blob: 06fb4a1b400e432b376a135d6b0e4e8763129b1f [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/delay.h>
30#include <linux/of.h>
31#include <linux/of_platform.h>
32#include <linux/of_gpio.h>
33#include <linux/list.h>
34#include <linux/uaccess.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070038#include <linux/regulator/consumer.h>
39#include <linux/pm_wakeup.h>
40#include <linux/power_supply.h>
41#include <linux/cdev.h>
42#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070043#include <linux/msm-bus.h>
44#include <linux/irq.h>
45#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053046#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070047#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070048
49#include "power.h"
50#include "core.h"
51#include "gadget.h"
52#include "dbm.h"
53#include "debug.h"
54#include "xhci.h"
55
56/* time out to wait for USB cable status notification (in ms)*/
57#define SM_INIT_TIMEOUT 30000
58
59/* AHB2PHY register offsets */
60#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
61
62/* AHB2PHY read/write waite value */
63#define ONE_READ_WRITE_WAIT 0x11
64
65/* cpu to fix usb interrupt */
66static int cpu_to_affin;
67module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
69
70/* XHCI registers */
71#define USB3_HCSPARAMS1 (0x4)
72#define USB3_PORTSC (0x420)
73
74/**
75 * USB QSCRATCH Hardware registers
76 *
77 */
78#define QSCRATCH_REG_OFFSET (0x000F8800)
79#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
80#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
81#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
82#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
83
84#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
85#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
86#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
87#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
88#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
89
90/* QSCRATCH_GENERAL_CFG register bit offset */
91#define PIPE_UTMI_CLK_SEL BIT(0)
92#define PIPE3_PHYSTATUS_SW BIT(3)
93#define PIPE_UTMI_CLK_DIS BIT(8)
94
95#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
96#define UTMI_OTG_VBUS_VALID BIT(20)
97#define SW_SESSVLD_SEL BIT(28)
98
99#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
100#define LANE0_PWR_PRESENT BIT(24)
101
102/* GSI related registers */
103#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
104#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
105
106#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
107#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
108#define GSI_CLK_EN_MASK BIT(12)
109#define BLOCK_GSI_WR_GO_MASK BIT(1)
110#define GSI_EN_MASK BIT(0)
111
112#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
113#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
114#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
115#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
116
117#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
118#define GSI_WR_CTRL_STATE_MASK BIT(15)
119
Mayank Ranaf4918d32016-12-15 13:35:55 -0800120#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
121#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
122#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
123#define DWC3_GEVENT_TYPE_GSI 0x3
124
Mayank Rana511f3b22016-08-02 12:00:11 -0700125struct dwc3_msm_req_complete {
126 struct list_head list_item;
127 struct usb_request *req;
128 void (*orig_complete)(struct usb_ep *ep,
129 struct usb_request *req);
130};
131
132enum dwc3_id_state {
133 DWC3_ID_GROUND = 0,
134 DWC3_ID_FLOAT,
135};
136
137/* for type c cable */
138enum plug_orientation {
139 ORIENTATION_NONE,
140 ORIENTATION_CC1,
141 ORIENTATION_CC2,
142};
143
144/* Input bits to state machine (mdwc->inputs) */
145
146#define ID 0
147#define B_SESS_VLD 1
148#define B_SUSPEND 2
149
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530150#define PM_QOS_SAMPLE_SEC 2
151#define PM_QOS_THRESHOLD 400
152
Mayank Rana511f3b22016-08-02 12:00:11 -0700153struct dwc3_msm {
154 struct device *dev;
155 void __iomem *base;
156 void __iomem *ahb2phy_base;
157 struct platform_device *dwc3;
158 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
159 struct list_head req_complete_list;
160 struct clk *xo_clk;
161 struct clk *core_clk;
162 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800163 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700164 struct clk *iface_clk;
165 struct clk *sleep_clk;
166 struct clk *utmi_clk;
167 unsigned int utmi_clk_rate;
168 struct clk *utmi_clk_src;
169 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530170 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700171 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530172 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700173 struct regulator *dwc3_gdsc;
174
175 struct usb_phy *hs_phy, *ss_phy;
176
177 struct dbm *dbm;
178
179 /* VBUS regulator for host mode */
180 struct regulator *vbus_reg;
181 int vbus_retry_count;
182 bool resume_pending;
183 atomic_t pm_suspended;
184 int hs_phy_irq;
185 int ss_phy_irq;
186 struct work_struct resume_work;
187 struct work_struct restart_usb_work;
188 bool in_restart;
189 struct workqueue_struct *dwc3_wq;
190 struct delayed_work sm_work;
191 unsigned long inputs;
192 unsigned int max_power;
193 bool charging_disabled;
194 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700195 struct work_struct bus_vote_w;
196 unsigned int bus_vote;
197 u32 bus_perf_client;
198 struct msm_bus_scale_pdata *bus_scale_table;
199 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700200 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700201 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800202 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700203 unsigned int tx_fifo_size;
204 bool vbus_active;
205 bool suspend;
206 bool disable_host_mode_pm;
207 enum dwc3_id_state id_state;
208 unsigned long lpm_flags;
209#define MDWC3_SS_PHY_SUSPEND BIT(0)
210#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
211#define MDWC3_POWER_COLLAPSE BIT(2)
212
213 unsigned int irq_to_affin;
214 struct notifier_block dwc3_cpu_notifier;
215
216 struct extcon_dev *extcon_vbus;
217 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800218 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700219 struct notifier_block vbus_nb;
220 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800221 struct notifier_block eud_event_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700222
Jack Pham4d4e9342016-12-07 19:25:02 -0800223 struct notifier_block host_nb;
224
Mayank Rana511f3b22016-08-02 12:00:11 -0700225 int pwr_event_irq;
226 atomic_t in_p3;
227 unsigned int lpm_to_suspend_delay;
228 bool init;
229 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800230 u32 num_gsi_event_buffers;
231 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530232 int pm_qos_latency;
233 struct pm_qos_request pm_qos_req_dma;
234 struct delayed_work perf_vote_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700235};
236
237#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
238#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
239#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
240
241#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
242#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
243#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
244
245#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
246#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
247#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
248
249#define DSTS_CONNECTSPD_SS 0x4
250
251
252static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
253static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800254static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana511f3b22016-08-02 12:00:11 -0700255/**
256 *
257 * Read register with debug info.
258 *
259 * @base - DWC3 base virtual address.
260 * @offset - register offset.
261 *
262 * @return u32
263 */
264static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
265{
266 u32 val = ioread32(base + offset);
267 return val;
268}
269
270/**
271 * Read register masked field with debug info.
272 *
273 * @base - DWC3 base virtual address.
274 * @offset - register offset.
275 * @mask - register bitmask.
276 *
277 * @return u32
278 */
279static inline u32 dwc3_msm_read_reg_field(void *base,
280 u32 offset,
281 const u32 mask)
282{
283 u32 shift = find_first_bit((void *)&mask, 32);
284 u32 val = ioread32(base + offset);
285
286 val &= mask; /* clear other bits */
287 val >>= shift;
288 return val;
289}
290
291/**
292 *
293 * Write register with debug info.
294 *
295 * @base - DWC3 base virtual address.
296 * @offset - register offset.
297 * @val - value to write.
298 *
299 */
300static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
301{
302 iowrite32(val, base + offset);
303}
304
305/**
306 * Write register masked field with debug info.
307 *
308 * @base - DWC3 base virtual address.
309 * @offset - register offset.
310 * @mask - register bitmask.
311 * @val - value to write.
312 *
313 */
314static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
315 const u32 mask, u32 val)
316{
317 u32 shift = find_first_bit((void *)&mask, 32);
318 u32 tmp = ioread32(base + offset);
319
320 tmp &= ~mask; /* clear written bits */
321 val = tmp | (val << shift);
322 iowrite32(val, base + offset);
323}
324
325/**
326 * Write register and read back masked value to confirm it is written
327 *
328 * @base - DWC3 base virtual address.
329 * @offset - register offset.
330 * @mask - register bitmask specifying what should be updated
331 * @val - value to write.
332 *
333 */
334static inline void dwc3_msm_write_readback(void *base, u32 offset,
335 const u32 mask, u32 val)
336{
337 u32 write_val, tmp = ioread32(base + offset);
338
339 tmp &= ~mask; /* retain other bits */
340 write_val = tmp | val;
341
342 iowrite32(write_val, base + offset);
343
344 /* Read back to see if val was written */
345 tmp = ioread32(base + offset);
346 tmp &= mask; /* clear other bits */
347
348 if (tmp != val)
349 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
350 __func__, val, offset);
351}
352
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800353static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
354{
355 int i, num_ports;
356 u32 reg;
357
358 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
359 num_ports = HCS_MAX_PORTS(reg);
360
361 for (i = 0; i < num_ports; i++) {
362 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
363 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
364 return true;
365 }
366
367 return false;
368}
369
Mayank Rana511f3b22016-08-02 12:00:11 -0700370static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
371{
372 int i, num_ports;
373 u32 reg;
374
375 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
376 num_ports = HCS_MAX_PORTS(reg);
377
378 for (i = 0; i < num_ports; i++) {
379 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
380 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
381 return true;
382 }
383
384 return false;
385}
386
387static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
388{
389 u8 speed;
390
391 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
392 return !!(speed & DSTS_CONNECTSPD_SS);
393}
394
395static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
396{
397 if (mdwc->in_host_mode)
398 return dwc3_msm_is_host_superspeed(mdwc);
399
400 return dwc3_msm_is_dev_superspeed(mdwc);
401}
402
403#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
404/**
405 * Configure the DBM with the BAM's data fifo.
406 * This function is called by the USB BAM Driver
407 * upon initialization.
408 *
409 * @ep - pointer to usb endpoint.
410 * @addr - address of data fifo.
411 * @size - size of data fifo.
412 *
413 */
414int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
415 u32 size, u8 dst_pipe_idx)
416{
417 struct dwc3_ep *dep = to_dwc3_ep(ep);
418 struct dwc3 *dwc = dep->dwc;
419 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
420
421 dev_dbg(mdwc->dev, "%s\n", __func__);
422
423 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
424 dst_pipe_idx);
425}
426
427
428/**
429* Cleanups for msm endpoint on request complete.
430*
431* Also call original request complete.
432*
433* @usb_ep - pointer to usb_ep instance.
434* @request - pointer to usb_request instance.
435*
436* @return int - 0 on success, negative on error.
437*/
438static void dwc3_msm_req_complete_func(struct usb_ep *ep,
439 struct usb_request *request)
440{
441 struct dwc3_ep *dep = to_dwc3_ep(ep);
442 struct dwc3 *dwc = dep->dwc;
443 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
444 struct dwc3_msm_req_complete *req_complete = NULL;
445
446 /* Find original request complete function and remove it from list */
447 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
448 if (req_complete->req == request)
449 break;
450 }
451 if (!req_complete || req_complete->req != request) {
452 dev_err(dep->dwc->dev, "%s: could not find the request\n",
453 __func__);
454 return;
455 }
456 list_del(&req_complete->list_item);
457
458 /*
459 * Release another one TRB to the pool since DBM queue took 2 TRBs
460 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
461 * released only one.
462 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700463 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700464
465 /* Unconfigure dbm ep */
466 dbm_ep_unconfig(mdwc->dbm, dep->number);
467
468 /*
469 * If this is the last endpoint we unconfigured, than reset also
470 * the event buffers; unless unconfiguring the ep due to lpm,
471 * in which case the event buffer only gets reset during the
472 * block reset.
473 */
474 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
475 !dbm_reset_ep_after_lpm(mdwc->dbm))
476 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
477
478 /*
479 * Call original complete function, notice that dwc->lock is already
480 * taken by the caller of this function (dwc3_gadget_giveback()).
481 */
482 request->complete = req_complete->orig_complete;
483 if (request->complete)
484 request->complete(ep, request);
485
486 kfree(req_complete);
487}
488
489
490/**
491* Helper function
492*
493* Reset DBM endpoint.
494*
495* @mdwc - pointer to dwc3_msm instance.
496* @dep - pointer to dwc3_ep instance.
497*
498* @return int - 0 on success, negative on error.
499*/
500static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
501{
502 int ret;
503
504 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
505
506 /* Reset the dbm endpoint */
507 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
508 if (ret) {
509 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
510 __func__);
511 return ret;
512 }
513
514 /*
515 * The necessary delay between asserting and deasserting the dbm ep
516 * reset is based on the number of active endpoints. If there is more
517 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
518 * delay will suffice.
519 */
520 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
521 usleep_range(1000, 1200);
522 else
523 udelay(10);
524 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
525 if (ret) {
526 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
527 __func__);
528 return ret;
529 }
530
531 return 0;
532}
533
534/**
535* Reset the DBM endpoint which is linked to the given USB endpoint.
536*
537* @usb_ep - pointer to usb_ep instance.
538*
539* @return int - 0 on success, negative on error.
540*/
541
542int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
543{
544 struct dwc3_ep *dep = to_dwc3_ep(ep);
545 struct dwc3 *dwc = dep->dwc;
546 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
547
548 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
549}
550EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
551
552
553/**
554* Helper function.
555* See the header of the dwc3_msm_ep_queue function.
556*
557* @dwc3_ep - pointer to dwc3_ep instance.
558* @req - pointer to dwc3_request instance.
559*
560* @return int - 0 on success, negative on error.
561*/
562static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
563{
564 struct dwc3_trb *trb;
565 struct dwc3_trb *trb_link;
566 struct dwc3_gadget_ep_cmd_params params;
567 u32 cmd;
568 int ret = 0;
569
Mayank Rana83ad5822016-08-09 14:17:22 -0700570 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700571 * this request is issued with start transfer. The request will be out
572 * from this list in 2 cases. The first is that the transfer will be
573 * completed (not if the transfer is endless using a circular TRBs with
574 * with link TRB). The second case is an option to do stop stransfer,
575 * this can be initiated by the function driver when calling dequeue.
576 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700577 req->started = true;
578 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700579
580 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700581 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
582 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700583 memset(trb, 0, sizeof(*trb));
584
585 req->trb = trb;
586 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
587 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
588 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
589 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
590 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
591
592 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700593 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
594 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700595 memset(trb_link, 0, sizeof(*trb_link));
596
597 trb_link->bpl = lower_32_bits(req->trb_dma);
598 trb_link->bph = DBM_TRB_BIT |
599 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
600 trb_link->size = 0;
601 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
602
603 /*
604 * Now start the transfer
605 */
606 memset(&params, 0, sizeof(params));
607 params.param0 = 0; /* TDAddr High */
608 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
609
610 /* DBM requires IOC to be set */
611 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700612 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700613 if (ret < 0) {
614 dev_dbg(dep->dwc->dev,
615 "%s: failed to send STARTTRANSFER command\n",
616 __func__);
617
618 list_del(&req->list);
619 return ret;
620 }
621 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700622 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700623
624 return ret;
625}
626
627/**
628* Queue a usb request to the DBM endpoint.
629* This function should be called after the endpoint
630* was enabled by the ep_enable.
631*
632* This function prepares special structure of TRBs which
633* is familiar with the DBM HW, so it will possible to use
634* this endpoint in DBM mode.
635*
636* The TRBs prepared by this function, is one normal TRB
637* which point to a fake buffer, followed by a link TRB
638* that points to the first TRB.
639*
640* The API of this function follow the regular API of
641* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
642*
643* @usb_ep - pointer to usb_ep instance.
644* @request - pointer to usb_request instance.
645* @gfp_flags - possible flags.
646*
647* @return int - 0 on success, negative on error.
648*/
649static int dwc3_msm_ep_queue(struct usb_ep *ep,
650 struct usb_request *request, gfp_t gfp_flags)
651{
652 struct dwc3_request *req = to_dwc3_request(request);
653 struct dwc3_ep *dep = to_dwc3_ep(ep);
654 struct dwc3 *dwc = dep->dwc;
655 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
656 struct dwc3_msm_req_complete *req_complete;
657 unsigned long flags;
658 int ret = 0, size;
659 u8 bam_pipe;
660 bool producer;
661 bool disable_wb;
662 bool internal_mem;
663 bool ioc;
664 bool superspeed;
665
666 if (!(request->udc_priv & MSM_SPS_MODE)) {
667 /* Not SPS mode, call original queue */
668 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
669 __func__);
670
671 return (mdwc->original_ep_ops[dep->number])->queue(ep,
672 request,
673 gfp_flags);
674 }
675
676 /* HW restriction regarding TRB size (8KB) */
677 if (req->request.length < 0x2000) {
678 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
679 return -EINVAL;
680 }
681
682 /*
683 * Override req->complete function, but before doing that,
684 * store it's original pointer in the req_complete_list.
685 */
686 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
687 if (!req_complete)
688 return -ENOMEM;
689
690 req_complete->req = request;
691 req_complete->orig_complete = request->complete;
692 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
693 request->complete = dwc3_msm_req_complete_func;
694
695 /*
696 * Configure the DBM endpoint
697 */
698 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
699 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
700 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
701 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
702 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
703
704 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
705 disable_wb, internal_mem, ioc);
706 if (ret < 0) {
707 dev_err(mdwc->dev,
708 "error %d after calling dbm_ep_config\n", ret);
709 return ret;
710 }
711
712 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
713 __func__, request, ep->name, request->length);
714 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
715 dbm_event_buffer_config(mdwc->dbm,
716 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
717 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
718 DWC3_GEVNTSIZ_SIZE(size));
719
720 /*
721 * We must obtain the lock of the dwc3 core driver,
722 * including disabling interrupts, so we will be sure
723 * that we are the only ones that configure the HW device
724 * core and ensure that we queuing the request will finish
725 * as soon as possible so we will release back the lock.
726 */
727 spin_lock_irqsave(&dwc->lock, flags);
728 if (!dep->endpoint.desc) {
729 dev_err(mdwc->dev,
730 "%s: trying to queue request %p to disabled ep %s\n",
731 __func__, request, ep->name);
732 ret = -EPERM;
733 goto err;
734 }
735
736 if (dep->number == 0 || dep->number == 1) {
737 dev_err(mdwc->dev,
738 "%s: trying to queue dbm request %p to control ep %s\n",
739 __func__, request, ep->name);
740 ret = -EPERM;
741 goto err;
742 }
743
744
Mayank Rana83ad5822016-08-09 14:17:22 -0700745 if (dep->trb_dequeue != dep->trb_enqueue ||
746 !list_empty(&dep->pending_list)
747 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700748 dev_err(mdwc->dev,
749 "%s: trying to queue dbm request %p tp ep %s\n",
750 __func__, request, ep->name);
751 ret = -EPERM;
752 goto err;
753 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700754 dep->trb_dequeue = 0;
755 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700756 }
757
758 ret = __dwc3_msm_ep_queue(dep, req);
759 if (ret < 0) {
760 dev_err(mdwc->dev,
761 "error %d after calling __dwc3_msm_ep_queue\n", ret);
762 goto err;
763 }
764
765 spin_unlock_irqrestore(&dwc->lock, flags);
766 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
767 dbm_set_speed(mdwc->dbm, (u8)superspeed);
768
769 return 0;
770
771err:
772 spin_unlock_irqrestore(&dwc->lock, flags);
773 kfree(req_complete);
774 return ret;
775}
776
777/*
778* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
779*
780* @usb_ep - pointer to usb_ep instance.
781*
782* @return int - XferRscIndex
783*/
784static inline int gsi_get_xfer_index(struct usb_ep *ep)
785{
786 struct dwc3_ep *dep = to_dwc3_ep(ep);
787
788 return dep->resource_index;
789}
790
791/*
792* Fills up the GSI channel information needed in call to IPA driver
793* for GSI channel creation.
794*
795* @usb_ep - pointer to usb_ep instance.
796* @ch_info - output parameter with requested channel info
797*/
798static void gsi_get_channel_info(struct usb_ep *ep,
799 struct gsi_channel_info *ch_info)
800{
801 struct dwc3_ep *dep = to_dwc3_ep(ep);
802 int last_trb_index = 0;
803 struct dwc3 *dwc = dep->dwc;
804 struct usb_gsi_request *request = ch_info->ch_req;
805
806 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
807 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700808 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700809 ch_info->depcmd_hi_addr = 0;
810
811 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
812 &dep->trb_pool[0]);
813 /* Convert to multipled of 1KB */
814 ch_info->const_buffer_size = request->buf_len/1024;
815
816 /* IN direction */
817 if (dep->direction) {
818 /*
819 * Multiply by size of each TRB for xfer_ring_len in bytes.
820 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
821 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
822 */
823 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
824 last_trb_index = 2 * request->num_bufs + 2;
825 } else { /* OUT direction */
826 /*
827 * Multiply by size of each TRB for xfer_ring_len in bytes.
828 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
829 * LINK TRB.
830 */
831 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
832 last_trb_index = request->num_bufs + 1;
833 }
834
835 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
836 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
837 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
838 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
839 DWC3_GEVNTCOUNT(ep->ep_intr_num));
840 ch_info->gevntcount_hi_addr = 0;
841
842 dev_dbg(dwc->dev,
843 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
844 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
845 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
846}
847
848/*
849* Perform StartXfer on GSI EP. Stores XferRscIndex.
850*
851* @usb_ep - pointer to usb_ep instance.
852*
853* @return int - 0 on success
854*/
855static int gsi_startxfer_for_ep(struct usb_ep *ep)
856{
857 int ret;
858 struct dwc3_gadget_ep_cmd_params params;
859 u32 cmd;
860 struct dwc3_ep *dep = to_dwc3_ep(ep);
861 struct dwc3 *dwc = dep->dwc;
862
863 memset(&params, 0, sizeof(params));
864 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
865 params.param0 |= (ep->ep_intr_num << 16);
866 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
867 &dep->trb_pool[0]));
868 cmd = DWC3_DEPCMD_STARTTRANSFER;
869 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700870 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700871
872 if (ret < 0)
873 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700874 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700875 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
876 return ret;
877}
878
879/*
880* Store Ring Base and Doorbell Address for GSI EP
881* for GSI channel creation.
882*
883* @usb_ep - pointer to usb_ep instance.
884* @dbl_addr - Doorbell address obtained from IPA driver
885*/
886static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
887{
888 struct dwc3_ep *dep = to_dwc3_ep(ep);
889 struct dwc3 *dwc = dep->dwc;
890 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
891 int n = ep->ep_intr_num - 1;
892
893 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
894 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
895 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
896
897 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
898 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
899 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
900 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
901}
902
903/*
904* Rings Doorbell for IN GSI Channel
905*
906* @usb_ep - pointer to usb_ep instance.
907* @request - pointer to GSI request. This is used to pass in the
908* address of the GSI doorbell obtained from IPA driver
909*/
910static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
911{
912 void __iomem *gsi_dbl_address_lsb;
913 void __iomem *gsi_dbl_address_msb;
914 dma_addr_t offset;
915 u64 dbl_addr = *((u64 *)request->buf_base_addr);
916 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
917 u32 dbl_hi_addr = (dbl_addr >> 32);
918 u32 num_trbs = (request->num_bufs * 2 + 2);
919 struct dwc3_ep *dep = to_dwc3_ep(ep);
920 struct dwc3 *dwc = dep->dwc;
921 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
922
923 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
924 dbl_lo_addr, sizeof(u32));
925 if (!gsi_dbl_address_lsb)
926 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
927
928 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
929 dbl_hi_addr, sizeof(u32));
930 if (!gsi_dbl_address_msb)
931 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
932
933 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
934 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
935 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
936
937 writel_relaxed(offset, gsi_dbl_address_lsb);
938 writel_relaxed(0, gsi_dbl_address_msb);
939}
940
941/*
942* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
943*
944* @usb_ep - pointer to usb_ep instance.
945* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
946*
947* @return int - 0 on success
948*/
949static int gsi_updatexfer_for_ep(struct usb_ep *ep,
950 struct usb_gsi_request *request)
951{
952 int i;
953 int ret;
954 u32 cmd;
955 int num_trbs = request->num_bufs + 1;
956 struct dwc3_trb *trb;
957 struct dwc3_gadget_ep_cmd_params params;
958 struct dwc3_ep *dep = to_dwc3_ep(ep);
959 struct dwc3 *dwc = dep->dwc;
960
961 for (i = 0; i < num_trbs - 1; i++) {
962 trb = &dep->trb_pool[i];
963 trb->ctrl |= DWC3_TRB_CTRL_HWO;
964 }
965
966 memset(&params, 0, sizeof(params));
967 cmd = DWC3_DEPCMD_UPDATETRANSFER;
968 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700969 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700970 dep->flags |= DWC3_EP_BUSY;
971 if (ret < 0)
972 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
973 return ret;
974}
975
976/*
977* Perform EndXfer on particular GSI EP.
978*
979* @usb_ep - pointer to usb_ep instance.
980*/
981static void gsi_endxfer_for_ep(struct usb_ep *ep)
982{
983 struct dwc3_ep *dep = to_dwc3_ep(ep);
984 struct dwc3 *dwc = dep->dwc;
985
986 dwc3_stop_active_transfer(dwc, dep->number, true);
987}
988
989/*
990* Allocates and configures TRBs for GSI EPs.
991*
992* @usb_ep - pointer to usb_ep instance.
993* @request - pointer to GSI request.
994*
995* @return int - 0 on success
996*/
997static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
998{
999 int i = 0;
1000 dma_addr_t buffer_addr = req->dma;
1001 struct dwc3_ep *dep = to_dwc3_ep(ep);
1002 struct dwc3 *dwc = dep->dwc;
1003 struct dwc3_trb *trb;
1004 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
1005 : (req->num_bufs + 1);
1006
1007 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
1008 num_trbs * sizeof(struct dwc3_trb),
1009 num_trbs * sizeof(struct dwc3_trb), 0);
1010 if (!dep->trb_dma_pool) {
1011 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1012 dep->name);
1013 return -ENOMEM;
1014 }
1015
1016 dep->num_trbs = num_trbs;
1017
1018 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
1019 GFP_KERNEL, &dep->trb_pool_dma);
1020 if (!dep->trb_pool) {
1021 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
1022 dep->name);
1023 return -ENOMEM;
1024 }
1025
1026 /* IN direction */
1027 if (dep->direction) {
1028 for (i = 0; i < num_trbs ; i++) {
1029 trb = &dep->trb_pool[i];
1030 memset(trb, 0, sizeof(*trb));
1031 /* Set up first n+1 TRBs for ZLPs */
1032 if (i < (req->num_bufs + 1)) {
1033 trb->bpl = 0;
1034 trb->bph = 0;
1035 trb->size = 0;
1036 trb->ctrl = DWC3_TRBCTL_NORMAL
1037 | DWC3_TRB_CTRL_IOC;
1038 continue;
1039 }
1040
1041 /* Setup n TRBs pointing to valid buffers */
1042 trb->bpl = lower_32_bits(buffer_addr);
1043 trb->bph = 0;
1044 trb->size = 0;
1045 trb->ctrl = DWC3_TRBCTL_NORMAL
1046 | DWC3_TRB_CTRL_IOC;
1047 buffer_addr += req->buf_len;
1048
1049 /* Set up the Link TRB at the end */
1050 if (i == (num_trbs - 1)) {
1051 trb->bpl = dwc3_trb_dma_offset(dep,
1052 &dep->trb_pool[0]);
1053 trb->bph = (1 << 23) | (1 << 21)
1054 | (ep->ep_intr_num << 16);
1055 trb->size = 0;
1056 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1057 | DWC3_TRB_CTRL_HWO;
1058 }
1059 }
1060 } else { /* OUT direction */
1061
1062 for (i = 0; i < num_trbs ; i++) {
1063
1064 trb = &dep->trb_pool[i];
1065 memset(trb, 0, sizeof(*trb));
1066 trb->bpl = lower_32_bits(buffer_addr);
1067 trb->bph = 0;
1068 trb->size = req->buf_len;
1069 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1070 | DWC3_TRB_CTRL_CSP
1071 | DWC3_TRB_CTRL_ISP_IMI;
1072 buffer_addr += req->buf_len;
1073
1074 /* Set up the Link TRB at the end */
1075 if (i == (num_trbs - 1)) {
1076 trb->bpl = dwc3_trb_dma_offset(dep,
1077 &dep->trb_pool[0]);
1078 trb->bph = (1 << 23) | (1 << 21)
1079 | (ep->ep_intr_num << 16);
1080 trb->size = 0;
1081 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1082 | DWC3_TRB_CTRL_HWO;
1083 }
1084 }
1085 }
1086 return 0;
1087}
1088
1089/*
1090* Frees TRBs for GSI EPs.
1091*
1092* @usb_ep - pointer to usb_ep instance.
1093*
1094*/
1095static void gsi_free_trbs(struct usb_ep *ep)
1096{
1097 struct dwc3_ep *dep = to_dwc3_ep(ep);
1098
1099 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1100 return;
1101
1102 /* Free TRBs and TRB pool for EP */
1103 if (dep->trb_dma_pool) {
1104 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1105 dep->trb_pool_dma);
1106 dma_pool_destroy(dep->trb_dma_pool);
1107 dep->trb_pool = NULL;
1108 dep->trb_pool_dma = 0;
1109 dep->trb_dma_pool = NULL;
1110 }
1111}
1112/*
1113* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1114*
1115* @usb_ep - pointer to usb_ep instance.
1116* @request - pointer to GSI request.
1117*/
1118static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1119{
1120 struct dwc3_ep *dep = to_dwc3_ep(ep);
1121 struct dwc3 *dwc = dep->dwc;
1122 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1123 struct dwc3_gadget_ep_cmd_params params;
1124 const struct usb_endpoint_descriptor *desc = ep->desc;
1125 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1126 u32 reg;
1127
1128 memset(&params, 0x00, sizeof(params));
1129
1130 /* Configure GSI EP */
1131 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1132 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1133
1134 /* Burst size is only needed in SuperSpeed mode */
1135 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1136 u32 burst = dep->endpoint.maxburst - 1;
1137
1138 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1139 }
1140
1141 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1142 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1143 | DWC3_DEPCFG_STREAM_EVENT_EN;
1144 dep->stream_capable = true;
1145 }
1146
1147 /* Set EP number */
1148 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1149
1150 /* Set interrupter number for GSI endpoints */
1151 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1152
1153 /* Enable XferInProgress and XferComplete Interrupts */
1154 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1155 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1156 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1157 /*
1158 * We must use the lower 16 TX FIFOs even though
1159 * HW might have more
1160 */
1161 /* Remove FIFO Number for GSI EP*/
1162 if (dep->direction)
1163 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1164
1165 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1166
1167 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1168 params.param0, params.param1, params.param2, dep->name);
1169
Mayank Rana83ad5822016-08-09 14:17:22 -07001170 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001171
1172 /* Set XferRsc Index for GSI EP */
1173 if (!(dep->flags & DWC3_EP_ENABLED)) {
1174 memset(&params, 0x00, sizeof(params));
1175 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001176 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001177 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1178
1179 dep->endpoint.desc = desc;
1180 dep->comp_desc = comp_desc;
1181 dep->type = usb_endpoint_type(desc);
1182 dep->flags |= DWC3_EP_ENABLED;
1183 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1184 reg |= DWC3_DALEPENA_EP(dep->number);
1185 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1186 }
1187
1188}
1189
1190/*
1191* Enables USB wrapper for GSI
1192*
1193* @usb_ep - pointer to usb_ep instance.
1194*/
1195static void gsi_enable(struct usb_ep *ep)
1196{
1197 struct dwc3_ep *dep = to_dwc3_ep(ep);
1198 struct dwc3 *dwc = dep->dwc;
1199 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1200
1201 dwc3_msm_write_reg_field(mdwc->base,
1202 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1203 dwc3_msm_write_reg_field(mdwc->base,
1204 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1205 dwc3_msm_write_reg_field(mdwc->base,
1206 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1207 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1208 dwc3_msm_write_reg_field(mdwc->base,
1209 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1210}
1211
1212/*
1213* Block or allow doorbell towards GSI
1214*
1215* @usb_ep - pointer to usb_ep instance.
1216* @request - pointer to GSI request. In this case num_bufs is used as a bool
1217* to set or clear the doorbell bit
1218*/
1219static void gsi_set_clear_dbell(struct usb_ep *ep,
1220 bool block_db)
1221{
1222
1223 struct dwc3_ep *dep = to_dwc3_ep(ep);
1224 struct dwc3 *dwc = dep->dwc;
1225 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1226
1227 dwc3_msm_write_reg_field(mdwc->base,
1228 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1229}
1230
1231/*
1232* Performs necessary checks before stopping GSI channels
1233*
1234* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1235*/
1236static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1237{
1238 u32 timeout = 1500;
1239 u32 reg = 0;
1240 struct dwc3_ep *dep = to_dwc3_ep(ep);
1241 struct dwc3 *dwc = dep->dwc;
1242 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1243
1244 while (dwc3_msm_read_reg_field(mdwc->base,
1245 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1246 if (!timeout--) {
1247 dev_err(mdwc->dev,
1248 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1249 return false;
1250 }
1251 }
1252 /* Check for U3 only if we are not handling Function Suspend */
1253 if (!f_suspend) {
1254 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1255 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1256 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1257 return false;
1258 }
1259 }
1260
1261 return true;
1262}
1263
1264
1265/**
1266* Performs GSI operations or GSI EP related operations.
1267*
1268* @usb_ep - pointer to usb_ep instance.
1269* @op_data - pointer to opcode related data.
1270* @op - GSI related or GSI EP related op code.
1271*
1272* @return int - 0 on success, negative on error.
1273* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1274*/
1275static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1276 void *op_data, enum gsi_ep_op op)
1277{
1278 u32 ret = 0;
1279 struct dwc3_ep *dep = to_dwc3_ep(ep);
1280 struct dwc3 *dwc = dep->dwc;
1281 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1282 struct usb_gsi_request *request;
1283 struct gsi_channel_info *ch_info;
1284 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001285 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001286
1287 switch (op) {
1288 case GSI_EP_OP_PREPARE_TRBS:
1289 request = (struct usb_gsi_request *)op_data;
1290 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1291 ret = gsi_prepare_trbs(ep, request);
1292 break;
1293 case GSI_EP_OP_FREE_TRBS:
1294 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1295 gsi_free_trbs(ep);
1296 break;
1297 case GSI_EP_OP_CONFIG:
1298 request = (struct usb_gsi_request *)op_data;
1299 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001300 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001301 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001302 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001303 break;
1304 case GSI_EP_OP_STARTXFER:
1305 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001306 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001307 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001308 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001309 break;
1310 case GSI_EP_OP_GET_XFER_IDX:
1311 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1312 ret = gsi_get_xfer_index(ep);
1313 break;
1314 case GSI_EP_OP_STORE_DBL_INFO:
1315 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1316 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1317 break;
1318 case GSI_EP_OP_ENABLE_GSI:
1319 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1320 gsi_enable(ep);
1321 break;
1322 case GSI_EP_OP_GET_CH_INFO:
1323 ch_info = (struct gsi_channel_info *)op_data;
1324 gsi_get_channel_info(ep, ch_info);
1325 break;
1326 case GSI_EP_OP_RING_IN_DB:
1327 request = (struct usb_gsi_request *)op_data;
1328 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1329 gsi_ring_in_db(ep, request);
1330 break;
1331 case GSI_EP_OP_UPDATEXFER:
1332 request = (struct usb_gsi_request *)op_data;
1333 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001334 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001335 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001336 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001337 break;
1338 case GSI_EP_OP_ENDXFER:
1339 request = (struct usb_gsi_request *)op_data;
1340 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001341 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001342 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001343 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001344 break;
1345 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1346 block_db = *((bool *)op_data);
1347 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1348 block_db);
1349 gsi_set_clear_dbell(ep, block_db);
1350 break;
1351 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1352 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1353 f_suspend = *((bool *)op_data);
1354 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1355 break;
1356 case GSI_EP_OP_DISABLE:
1357 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1358 ret = ep->ops->disable(ep);
1359 break;
1360 default:
1361 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1362 }
1363
1364 return ret;
1365}
1366
1367/**
1368 * Configure MSM endpoint.
1369 * This function do specific configurations
1370 * to an endpoint which need specific implementaion
1371 * in the MSM architecture.
1372 *
1373 * This function should be called by usb function/class
1374 * layer which need a support from the specific MSM HW
1375 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1376 *
1377 * @ep - a pointer to some usb_ep instance
1378 *
1379 * @return int - 0 on success, negetive on error.
1380 */
1381int msm_ep_config(struct usb_ep *ep)
1382{
1383 struct dwc3_ep *dep = to_dwc3_ep(ep);
1384 struct dwc3 *dwc = dep->dwc;
1385 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1386 struct usb_ep_ops *new_ep_ops;
1387
1388
1389 /* Save original ep ops for future restore*/
1390 if (mdwc->original_ep_ops[dep->number]) {
1391 dev_err(mdwc->dev,
1392 "ep [%s,%d] already configured as msm endpoint\n",
1393 ep->name, dep->number);
1394 return -EPERM;
1395 }
1396 mdwc->original_ep_ops[dep->number] = ep->ops;
1397
1398 /* Set new usb ops as we like */
1399 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1400 if (!new_ep_ops)
1401 return -ENOMEM;
1402
1403 (*new_ep_ops) = (*ep->ops);
1404 new_ep_ops->queue = dwc3_msm_ep_queue;
1405 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1406 ep->ops = new_ep_ops;
1407
1408 /*
1409 * Do HERE more usb endpoint configurations
1410 * which are specific to MSM.
1411 */
1412
1413 return 0;
1414}
1415EXPORT_SYMBOL(msm_ep_config);
1416
1417/**
1418 * Un-configure MSM endpoint.
1419 * Tear down configurations done in the
1420 * dwc3_msm_ep_config function.
1421 *
1422 * @ep - a pointer to some usb_ep instance
1423 *
1424 * @return int - 0 on success, negative on error.
1425 */
1426int msm_ep_unconfig(struct usb_ep *ep)
1427{
1428 struct dwc3_ep *dep = to_dwc3_ep(ep);
1429 struct dwc3 *dwc = dep->dwc;
1430 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1431 struct usb_ep_ops *old_ep_ops;
1432
1433 /* Restore original ep ops */
1434 if (!mdwc->original_ep_ops[dep->number]) {
1435 dev_err(mdwc->dev,
1436 "ep [%s,%d] was not configured as msm endpoint\n",
1437 ep->name, dep->number);
1438 return -EINVAL;
1439 }
1440 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1441 ep->ops = mdwc->original_ep_ops[dep->number];
1442 mdwc->original_ep_ops[dep->number] = NULL;
1443 kfree(old_ep_ops);
1444
1445 /*
1446 * Do HERE more usb endpoint un-configurations
1447 * which are specific to MSM.
1448 */
1449
1450 return 0;
1451}
1452EXPORT_SYMBOL(msm_ep_unconfig);
1453#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1454
1455static void dwc3_resume_work(struct work_struct *w);
1456
1457static void dwc3_restart_usb_work(struct work_struct *w)
1458{
1459 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1460 restart_usb_work);
1461 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1462 unsigned int timeout = 50;
1463
1464 dev_dbg(mdwc->dev, "%s\n", __func__);
1465
1466 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1467 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1468 return;
1469 }
1470
1471 /* guard against concurrent VBUS handling */
1472 mdwc->in_restart = true;
1473
1474 if (!mdwc->vbus_active) {
1475 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1476 dwc->err_evt_seen = false;
1477 mdwc->in_restart = false;
1478 return;
1479 }
1480
Mayank Rana08e41922017-03-02 15:25:48 -08001481 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001482 /* Reset active USB connection */
1483 dwc3_resume_work(&mdwc->resume_work);
1484
1485 /* Make sure disconnect is processed before sending connect */
1486 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1487 msleep(20);
1488
1489 if (!timeout) {
1490 dev_dbg(mdwc->dev,
1491 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001492 dbg_event(0xFF, "ReStart:RT SUSP",
1493 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001494 pm_runtime_suspend(mdwc->dev);
1495 }
1496
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301497 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001498 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301499 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001500 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001501
1502 dwc->err_evt_seen = false;
1503 flush_delayed_work(&mdwc->sm_work);
1504}
1505
1506/*
1507 * Check whether the DWC3 requires resetting the ep
1508 * after going to Low Power Mode (lpm)
1509 */
1510bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1511{
1512 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1513 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1514
1515 return dbm_reset_ep_after_lpm(mdwc->dbm);
1516}
1517EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1518
1519/*
1520 * Config Global Distributed Switch Controller (GDSC)
1521 * to support controller power collapse
1522 */
1523static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1524{
1525 int ret;
1526
1527 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1528 return -EPERM;
1529
1530 if (on) {
1531 ret = regulator_enable(mdwc->dwc3_gdsc);
1532 if (ret) {
1533 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1534 return ret;
1535 }
1536 } else {
1537 ret = regulator_disable(mdwc->dwc3_gdsc);
1538 if (ret) {
1539 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1540 return ret;
1541 }
1542 }
1543
1544 return ret;
1545}
1546
1547static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1548{
1549 int ret = 0;
1550
1551 if (assert) {
1552 disable_irq(mdwc->pwr_event_irq);
1553 /* Using asynchronous block reset to the hardware */
1554 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1555 clk_disable_unprepare(mdwc->utmi_clk);
1556 clk_disable_unprepare(mdwc->sleep_clk);
1557 clk_disable_unprepare(mdwc->core_clk);
1558 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301559 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001560 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301561 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001562 } else {
1563 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301564 ret = reset_control_deassert(mdwc->core_reset);
1565 if (ret)
1566 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001567 ndelay(200);
1568 clk_prepare_enable(mdwc->iface_clk);
1569 clk_prepare_enable(mdwc->core_clk);
1570 clk_prepare_enable(mdwc->sleep_clk);
1571 clk_prepare_enable(mdwc->utmi_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001572 enable_irq(mdwc->pwr_event_irq);
1573 }
1574
1575 return ret;
1576}
1577
1578static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1579{
1580 u32 guctl, gfladj = 0;
1581
1582 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1583 guctl &= ~DWC3_GUCTL_REFCLKPER;
1584
1585 /* GFLADJ register is used starting with revision 2.50a */
1586 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1587 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1588 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1589 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1590 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1591 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1592 }
1593
1594 /* Refer to SNPS Databook Table 6-55 for calculations used */
1595 switch (mdwc->utmi_clk_rate) {
1596 case 19200000:
1597 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1598 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1599 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1600 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1601 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1602 break;
1603 case 24000000:
1604 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1605 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1606 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1607 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1608 break;
1609 default:
1610 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1611 mdwc->utmi_clk_rate);
1612 break;
1613 }
1614
1615 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1616 if (gfladj)
1617 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1618}
1619
1620/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1621static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1622{
1623 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1624 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1625 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1626 BIT(2), 1);
1627
1628 /*
1629 * Enable master clock for RAMs to allow BAM to access RAMs when
1630 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1631 * are seen where RAM clocks get turned OFF in SS mode
1632 */
1633 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1634 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1635
1636}
1637
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001638static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1639{
1640 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1641 vbus_draw_work);
1642 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1643
1644 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1645}
1646
Mayank Rana511f3b22016-08-02 12:00:11 -07001647static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1648{
1649 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001650 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001651 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001652 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001653
1654 switch (event) {
1655 case DWC3_CONTROLLER_ERROR_EVENT:
1656 dev_info(mdwc->dev,
1657 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1658 dwc->irq_cnt);
1659
1660 dwc3_gadget_disable_irq(dwc);
1661
1662 /* prevent core from generating interrupts until recovery */
1663 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1664 reg |= DWC3_GCTL_CORESOFTRESET;
1665 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1666
1667 /* restart USB which performs full reset and reconnect */
1668 schedule_work(&mdwc->restart_usb_work);
1669 break;
1670 case DWC3_CONTROLLER_RESET_EVENT:
1671 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1672 /* HS & SSPHYs get reset as part of core soft reset */
1673 dwc3_msm_qscratch_reg_init(mdwc);
1674 break;
1675 case DWC3_CONTROLLER_POST_RESET_EVENT:
1676 dev_dbg(mdwc->dev,
1677 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1678
1679 /*
1680 * Below sequence is used when controller is working without
1681 * having ssphy and only USB high speed is supported.
1682 */
1683 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1684 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1685 dwc3_msm_read_reg(mdwc->base,
1686 QSCRATCH_GENERAL_CFG)
1687 | PIPE_UTMI_CLK_DIS);
1688
1689 usleep_range(2, 5);
1690
1691
1692 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1693 dwc3_msm_read_reg(mdwc->base,
1694 QSCRATCH_GENERAL_CFG)
1695 | PIPE_UTMI_CLK_SEL
1696 | PIPE3_PHYSTATUS_SW);
1697
1698 usleep_range(2, 5);
1699
1700 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1701 dwc3_msm_read_reg(mdwc->base,
1702 QSCRATCH_GENERAL_CFG)
1703 & ~PIPE_UTMI_CLK_DIS);
1704 }
1705
1706 dwc3_msm_update_ref_clk(mdwc);
1707 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1708 break;
1709 case DWC3_CONTROLLER_CONNDONE_EVENT:
1710 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1711 /*
1712 * Add power event if the dbm indicates coming out of L1 by
1713 * interrupt
1714 */
1715 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1716 dwc3_msm_write_reg_field(mdwc->base,
1717 PWR_EVNT_IRQ_MASK_REG,
1718 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1719
1720 atomic_set(&dwc->in_lpm, 0);
1721 break;
1722 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1723 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1724 if (dwc->enable_bus_suspend) {
1725 mdwc->suspend = dwc->b_suspend;
1726 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1727 }
1728 break;
1729 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1730 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001731 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001732 break;
1733 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1734 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001735 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001736 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001737 case DWC3_GSI_EVT_BUF_ALLOC:
1738 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1739
1740 if (!mdwc->num_gsi_event_buffers)
1741 break;
1742
1743 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1744 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1745 GFP_KERNEL);
1746 if (!mdwc->gsi_ev_buff) {
1747 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1748 break;
1749 }
1750
1751 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1752
1753 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1754 if (!evt)
1755 break;
1756 evt->dwc = dwc;
1757 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1758 evt->buf = dma_alloc_coherent(dwc->dev,
1759 DWC3_EVENT_BUFFERS_SIZE,
1760 &evt->dma, GFP_KERNEL);
1761 if (!evt->buf) {
1762 dev_err(dwc->dev,
1763 "can't allocate gsi_evt_buf(%d)\n", i);
1764 break;
1765 }
1766 mdwc->gsi_ev_buff[i] = evt;
1767 }
1768 break;
1769 case DWC3_GSI_EVT_BUF_SETUP:
1770 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1771 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1772 evt = mdwc->gsi_ev_buff[i];
1773 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1774 evt->buf, (unsigned long long) evt->dma,
1775 evt->length);
1776 memset(evt->buf, 0, evt->length);
1777 evt->lpos = 0;
1778 /*
1779 * Primary event buffer is programmed with registers
1780 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1781 * program USB GSI related event buffer with DWC3
1782 * controller.
1783 */
1784 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1785 lower_32_bits(evt->dma));
1786 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1787 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1788 DWC3_GEVENT_TYPE_GSI) |
1789 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1790 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1791 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1792 ((evt->length) & 0xffff));
1793 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1794 }
1795 break;
1796 case DWC3_GSI_EVT_BUF_CLEANUP:
1797 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
1798 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1799 evt = mdwc->gsi_ev_buff[i];
1800 evt->lpos = 0;
1801 /*
1802 * Primary event buffer is programmed with registers
1803 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1804 * program USB GSI related event buffer with DWC3
1805 * controller.
1806 */
1807 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1808 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1809 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1810 DWC3_GEVNTSIZ_INTMASK |
1811 DWC3_GEVNTSIZ_SIZE((i+1)));
1812 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1813 }
1814 break;
1815 case DWC3_GSI_EVT_BUF_FREE:
1816 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
1817 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1818 evt = mdwc->gsi_ev_buff[i];
1819 if (evt)
1820 dma_free_coherent(dwc->dev, evt->length,
1821 evt->buf, evt->dma);
1822 }
1823 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001824 default:
1825 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1826 break;
1827 }
1828}
1829
1830static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1831{
1832 int ret = 0;
1833
1834 if (core_reset) {
1835 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1836 if (ret)
1837 return;
1838
1839 usleep_range(1000, 1200);
1840 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1841 if (ret)
1842 return;
1843
1844 usleep_range(10000, 12000);
1845 }
1846
1847 if (mdwc->dbm) {
1848 /* Reset the DBM */
1849 dbm_soft_reset(mdwc->dbm, 1);
1850 usleep_range(1000, 1200);
1851 dbm_soft_reset(mdwc->dbm, 0);
1852
1853 /*enable DBM*/
1854 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1855 DBM_EN_MASK, 0x1);
1856 dbm_enable(mdwc->dbm);
1857 }
1858}
1859
1860static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1861{
1862 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1863 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301864 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001865
1866 /* Configure AHB2PHY for one wait state read/write */
1867 if (mdwc->ahb2phy_base) {
1868 clk_prepare_enable(mdwc->cfg_ahb_clk);
1869 val = readl_relaxed(mdwc->ahb2phy_base +
1870 PERIPH_SS_AHB2PHY_TOP_CFG);
1871 if (val != ONE_READ_WRITE_WAIT) {
1872 writel_relaxed(ONE_READ_WRITE_WAIT,
1873 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1874 /* complete above write before configuring USB PHY. */
1875 mb();
1876 }
1877 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1878 }
1879
1880 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001881 dbg_event(0xFF, "dwc3 init",
1882 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301883 ret = dwc3_core_pre_init(dwc);
1884 if (ret) {
1885 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1886 return;
1887 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001888 mdwc->init = true;
1889 }
1890
1891 dwc3_core_init(dwc);
1892 /* Re-configure event buffers */
1893 dwc3_event_buffers_setup(dwc);
1894}
1895
1896static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1897{
1898 unsigned long timeout;
1899 u32 reg = 0;
1900
1901 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301902 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001903 if (!atomic_read(&mdwc->in_p3)) {
1904 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1905 return -EBUSY;
1906 }
1907 }
1908
1909 /* Clear previous L2 events */
1910 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1911 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1912
1913 /* Prepare HSPHY for suspend */
1914 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1915 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1916 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1917
1918 /* Wait for PHY to go into L2 */
1919 timeout = jiffies + msecs_to_jiffies(5);
1920 while (!time_after(jiffies, timeout)) {
1921 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1922 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1923 break;
1924 }
1925 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1926 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1927
1928 /* Clear L2 event bit */
1929 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1930 PWR_EVNT_LPM_IN_L2_MASK);
1931
1932 return 0;
1933}
1934
1935static void dwc3_msm_bus_vote_w(struct work_struct *w)
1936{
1937 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1938 int ret;
1939
1940 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1941 mdwc->bus_vote);
1942 if (ret)
1943 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1944}
1945
1946static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1947{
1948 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1949 int i, num_ports;
1950 u32 reg;
1951
1952 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1953 if (mdwc->in_host_mode) {
1954 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1955 num_ports = HCS_MAX_PORTS(reg);
1956 for (i = 0; i < num_ports; i++) {
1957 reg = dwc3_msm_read_reg(mdwc->base,
1958 USB3_PORTSC + i*0x10);
1959 if (reg & PORT_PE) {
1960 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1961 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1962 else if (DEV_LOWSPEED(reg))
1963 mdwc->hs_phy->flags |= PHY_LS_MODE;
1964 }
1965 }
1966 } else {
1967 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1968 dwc->gadget.speed == USB_SPEED_FULL)
1969 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1970 else if (dwc->gadget.speed == USB_SPEED_LOW)
1971 mdwc->hs_phy->flags |= PHY_LS_MODE;
1972 }
1973}
1974
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05301975static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
1976 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07001977
1978static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1979{
Mayank Rana83ad5822016-08-09 14:17:22 -07001980 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001981 bool can_suspend_ssphy;
1982 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07001983 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001984
1985 if (atomic_read(&dwc->in_lpm)) {
1986 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1987 return 0;
1988 }
1989
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05301990 cancel_delayed_work_sync(&mdwc->perf_vote_work);
1991 msm_dwc3_perf_vote_update(mdwc, false);
1992
Mayank Rana511f3b22016-08-02 12:00:11 -07001993 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07001994 evt = dwc->ev_buf;
1995 if ((evt->flags & DWC3_EVENT_PENDING)) {
1996 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001997 "%s: %d device events pending, abort suspend\n",
1998 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07001999 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002000 }
2001 }
2002
2003 if (!mdwc->vbus_active && dwc->is_drd &&
2004 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2005 /*
2006 * In some cases, the pm_runtime_suspend may be called by
2007 * usb_bam when there is pending lpm flag. However, if this is
2008 * done when cable was disconnected and otg state has not
2009 * yet changed to IDLE, then it means OTG state machine
2010 * is running and we race against it. So cancel LPM for now,
2011 * and OTG state machine will go for LPM later, after completing
2012 * transition to IDLE state.
2013 */
2014 dev_dbg(mdwc->dev,
2015 "%s: cable disconnected while not in idle otg state\n",
2016 __func__);
2017 return -EBUSY;
2018 }
2019
2020 /*
2021 * Check if device is not in CONFIGURED state
2022 * then check controller state of L2 and break
2023 * LPM sequence. Check this for device bus suspend case.
2024 */
2025 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2026 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2027 pr_err("%s(): Trying to go in LPM with state:%d\n",
2028 __func__, dwc->gadget.state);
2029 pr_err("%s(): LPM is not performed.\n", __func__);
2030 return -EBUSY;
2031 }
2032
2033 ret = dwc3_msm_prepare_suspend(mdwc);
2034 if (ret)
2035 return ret;
2036
2037 /* Initialize variables here */
2038 can_suspend_ssphy = !(mdwc->in_host_mode &&
2039 dwc3_msm_is_host_superspeed(mdwc));
2040
2041 /* Disable core irq */
2042 if (dwc->irq)
2043 disable_irq(dwc->irq);
2044
2045 /* disable power event irq, hs and ss phy irq is used as wake up src */
2046 disable_irq(mdwc->pwr_event_irq);
2047
2048 dwc3_set_phy_speed_flags(mdwc);
2049 /* Suspend HS PHY */
2050 usb_phy_set_suspend(mdwc->hs_phy, 1);
2051
2052 /* Suspend SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002053 if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002054 /* indicate phy about SS mode */
2055 if (dwc3_msm_is_superspeed(mdwc))
2056 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2057 usb_phy_set_suspend(mdwc->ss_phy, 1);
2058 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2059 }
2060
2061 /* make sure above writes are completed before turning off clocks */
2062 wmb();
2063
2064 /* Disable clocks */
2065 if (mdwc->bus_aggr_clk)
2066 clk_disable_unprepare(mdwc->bus_aggr_clk);
2067 clk_disable_unprepare(mdwc->utmi_clk);
2068
Hemant Kumar633dc332016-08-10 13:41:05 -07002069 /* Memory core: OFF, Memory periphery: OFF */
2070 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2071 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2072 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2073 }
2074
Mayank Rana511f3b22016-08-02 12:00:11 -07002075 clk_set_rate(mdwc->core_clk, 19200000);
2076 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302077 if (mdwc->noc_aggr_clk)
2078 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002079 /*
2080 * Disable iface_clk only after core_clk as core_clk has FSM
2081 * depedency on iface_clk. Hence iface_clk should be turned off
2082 * after core_clk is turned off.
2083 */
2084 clk_disable_unprepare(mdwc->iface_clk);
2085 /* USB PHY no more requires TCXO */
2086 clk_disable_unprepare(mdwc->xo_clk);
2087
2088 /* Perform controller power collapse */
Azhar Shaikh69f4c052016-02-11 11:00:58 -08002089 if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002090 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2091 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2092 dwc3_msm_config_gdsc(mdwc, 0);
2093 clk_disable_unprepare(mdwc->sleep_clk);
2094 }
2095
2096 /* Remove bus voting */
2097 if (mdwc->bus_perf_client) {
2098 mdwc->bus_vote = 0;
2099 schedule_work(&mdwc->bus_vote_w);
2100 }
2101
2102 /*
2103 * release wakeup source with timeout to defer system suspend to
2104 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2105 * event is received.
2106 */
2107 if (mdwc->lpm_to_suspend_delay) {
2108 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2109 mdwc->lpm_to_suspend_delay);
2110 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2111 } else {
2112 pm_relax(mdwc->dev);
2113 }
2114
2115 atomic_set(&dwc->in_lpm, 1);
2116
2117 /*
2118 * with DCP or during cable disconnect, we dont require wakeup
2119 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2120 * case of host bus suspend and device bus suspend.
2121 */
2122 if (mdwc->vbus_active || mdwc->in_host_mode) {
2123 enable_irq_wake(mdwc->hs_phy_irq);
2124 enable_irq(mdwc->hs_phy_irq);
2125 if (mdwc->ss_phy_irq) {
2126 enable_irq_wake(mdwc->ss_phy_irq);
2127 enable_irq(mdwc->ss_phy_irq);
2128 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002129 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2130 }
2131
2132 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2133 return 0;
2134}
2135
2136static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2137{
2138 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002139 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002140 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2141
2142 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2143
2144 if (!atomic_read(&dwc->in_lpm)) {
2145 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2146 return 0;
2147 }
2148
2149 pm_stay_awake(mdwc->dev);
2150
2151 /* Enable bus voting */
2152 if (mdwc->bus_perf_client) {
2153 mdwc->bus_vote = 1;
2154 schedule_work(&mdwc->bus_vote_w);
2155 }
2156
2157 /* Vote for TCXO while waking up USB HSPHY */
2158 ret = clk_prepare_enable(mdwc->xo_clk);
2159 if (ret)
2160 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2161 __func__, ret);
2162
2163 /* Restore controller power collapse */
2164 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2165 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2166 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302167 ret = reset_control_assert(mdwc->core_reset);
2168 if (ret)
2169 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2170 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002171 /* HW requires a short delay for reset to take place properly */
2172 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302173 ret = reset_control_deassert(mdwc->core_reset);
2174 if (ret)
2175 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2176 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002177 clk_prepare_enable(mdwc->sleep_clk);
2178 }
2179
2180 /*
2181 * Enable clocks
2182 * Turned ON iface_clk before core_clk due to FSM depedency.
2183 */
2184 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302185 if (mdwc->noc_aggr_clk)
2186 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002187
2188 core_clk_rate = mdwc->core_clk_rate;
2189 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2190 core_clk_rate = mdwc->core_clk_rate_hs;
2191 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2192 core_clk_rate);
2193 }
2194
2195 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002196 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002197
2198 /* set Memory core: ON, Memory periphery: ON */
2199 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2200 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2201
Mayank Rana511f3b22016-08-02 12:00:11 -07002202 clk_prepare_enable(mdwc->utmi_clk);
2203 if (mdwc->bus_aggr_clk)
2204 clk_prepare_enable(mdwc->bus_aggr_clk);
2205
2206 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002207 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2208 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002209 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2210 if (mdwc->typec_orientation == ORIENTATION_CC1)
2211 mdwc->ss_phy->flags |= PHY_LANE_A;
2212 if (mdwc->typec_orientation == ORIENTATION_CC2)
2213 mdwc->ss_phy->flags |= PHY_LANE_B;
2214 usb_phy_set_suspend(mdwc->ss_phy, 0);
2215 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2216 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2217 }
2218
2219 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2220 /* Resume HS PHY */
2221 usb_phy_set_suspend(mdwc->hs_phy, 0);
2222
2223 /* Recover from controller power collapse */
2224 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2225 u32 tmp;
2226
2227 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2228
2229 dwc3_msm_power_collapse_por(mdwc);
2230
2231 /* Get initial P3 status and enable IN_P3 event */
2232 tmp = dwc3_msm_read_reg_field(mdwc->base,
2233 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2234 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2235 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2236 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2237
2238 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2239 }
2240
2241 atomic_set(&dwc->in_lpm, 0);
2242
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302243 /* enable power evt irq for IN P3 detection */
2244 enable_irq(mdwc->pwr_event_irq);
2245
Mayank Rana511f3b22016-08-02 12:00:11 -07002246 /* Disable HSPHY auto suspend */
2247 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2248 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2249 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2250 DWC3_GUSB2PHYCFG_SUSPHY));
2251
2252 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2253 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2254 disable_irq_wake(mdwc->hs_phy_irq);
2255 disable_irq_nosync(mdwc->hs_phy_irq);
2256 if (mdwc->ss_phy_irq) {
2257 disable_irq_wake(mdwc->ss_phy_irq);
2258 disable_irq_nosync(mdwc->ss_phy_irq);
2259 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002260 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2261 }
2262
2263 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2264
Mayank Rana511f3b22016-08-02 12:00:11 -07002265 /* Enable core irq */
2266 if (dwc->irq)
2267 enable_irq(dwc->irq);
2268
2269 /*
2270 * Handle other power events that could not have been handled during
2271 * Low Power Mode
2272 */
2273 dwc3_pwr_event_handler(mdwc);
2274
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302275 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2276 schedule_delayed_work(&mdwc->perf_vote_work,
2277 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2278
Mayank Rana08e41922017-03-02 15:25:48 -08002279 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002280 return 0;
2281}
2282
2283/**
2284 * dwc3_ext_event_notify - callback to handle events from external transceiver
2285 *
2286 * Returns 0 on success
2287 */
2288static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2289{
2290 /* Flush processing any pending events before handling new ones */
2291 flush_delayed_work(&mdwc->sm_work);
2292
2293 if (mdwc->id_state == DWC3_ID_FLOAT) {
2294 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2295 set_bit(ID, &mdwc->inputs);
2296 } else {
2297 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2298 clear_bit(ID, &mdwc->inputs);
2299 }
2300
2301 if (mdwc->vbus_active && !mdwc->in_restart) {
2302 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2303 set_bit(B_SESS_VLD, &mdwc->inputs);
2304 } else {
2305 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2306 clear_bit(B_SESS_VLD, &mdwc->inputs);
2307 }
2308
2309 if (mdwc->suspend) {
2310 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2311 set_bit(B_SUSPEND, &mdwc->inputs);
2312 } else {
2313 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2314 clear_bit(B_SUSPEND, &mdwc->inputs);
2315 }
2316
2317 schedule_delayed_work(&mdwc->sm_work, 0);
2318}
2319
2320static void dwc3_resume_work(struct work_struct *w)
2321{
2322 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002323 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002324
2325 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2326
2327 /*
2328 * exit LPM first to meet resume timeline from device side.
2329 * resume_pending flag would prevent calling
2330 * dwc3_msm_resume() in case we are here due to system
2331 * wide resume without usb cable connected. This flag is set
2332 * only in case of power event irq in lpm.
2333 */
2334 if (mdwc->resume_pending) {
2335 dwc3_msm_resume(mdwc);
2336 mdwc->resume_pending = false;
2337 }
2338
Mayank Rana08e41922017-03-02 15:25:48 -08002339 if (atomic_read(&mdwc->pm_suspended)) {
2340 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002341 /* let pm resume kick in resume work later */
2342 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002343 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002344 dwc3_ext_event_notify(mdwc);
2345}
2346
2347static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2348{
2349 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2350 u32 irq_stat, irq_clear = 0;
2351
2352 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2353 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2354
2355 /* Check for P3 events */
2356 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2357 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2358 /* Can't tell if entered or exit P3, so check LINKSTATE */
2359 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2360 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2361 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2362 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2363
2364 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2365 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2366 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2367 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2368 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2369 atomic_set(&mdwc->in_p3, 0);
2370 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2371 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2372 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2373 atomic_set(&mdwc->in_p3, 1);
2374 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2375 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2376 }
2377
2378 /* Clear L2 exit */
2379 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2380 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2381 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2382 }
2383
2384 /* Handle exit from L1 events */
2385 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2386 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2387 __func__);
2388 if (usb_gadget_wakeup(&dwc->gadget))
2389 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2390 __func__);
2391 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2392 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2393 }
2394
2395 /* Unhandled events */
2396 if (irq_stat)
2397 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2398 __func__, irq_stat);
2399
2400 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2401}
2402
2403static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2404{
2405 struct dwc3_msm *mdwc = _mdwc;
2406 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2407
2408 dev_dbg(mdwc->dev, "%s\n", __func__);
2409
2410 if (atomic_read(&dwc->in_lpm))
2411 dwc3_resume_work(&mdwc->resume_work);
2412 else
2413 dwc3_pwr_event_handler(mdwc);
2414
Mayank Rana08e41922017-03-02 15:25:48 -08002415 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002416 return IRQ_HANDLED;
2417}
2418
2419static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2420{
2421 struct dwc3_msm *mdwc = data;
2422 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2423
2424 dwc->t_pwr_evt_irq = ktime_get();
2425 dev_dbg(mdwc->dev, "%s received\n", __func__);
2426 /*
2427 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2428 * which interrupts have been triggered, as the clocks are disabled.
2429 * Resume controller by waking up pwr event irq thread.After re-enabling
2430 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2431 * all other power events.
2432 */
2433 if (atomic_read(&dwc->in_lpm)) {
2434 /* set this to call dwc3_msm_resume() */
2435 mdwc->resume_pending = true;
2436 return IRQ_WAKE_THREAD;
2437 }
2438
2439 dwc3_pwr_event_handler(mdwc);
2440 return IRQ_HANDLED;
2441}
2442
2443static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2444 unsigned long action, void *hcpu)
2445{
2446 uint32_t cpu = (uintptr_t)hcpu;
2447 struct dwc3_msm *mdwc =
2448 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2449
2450 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2451 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2452 cpu_to_affin, mdwc->irq_to_affin);
2453 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2454 }
2455
2456 return NOTIFY_OK;
2457}
2458
2459static void dwc3_otg_sm_work(struct work_struct *w);
2460
2461static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2462{
2463 int ret;
2464
2465 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2466 if (IS_ERR(mdwc->dwc3_gdsc))
2467 mdwc->dwc3_gdsc = NULL;
2468
2469 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2470 if (IS_ERR(mdwc->xo_clk)) {
2471 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2472 __func__);
2473 ret = PTR_ERR(mdwc->xo_clk);
2474 return ret;
2475 }
2476 clk_set_rate(mdwc->xo_clk, 19200000);
2477
2478 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2479 if (IS_ERR(mdwc->iface_clk)) {
2480 dev_err(mdwc->dev, "failed to get iface_clk\n");
2481 ret = PTR_ERR(mdwc->iface_clk);
2482 return ret;
2483 }
2484
2485 /*
2486 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2487 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2488 * On newer platform it can run at 150MHz as well.
2489 */
2490 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2491 if (IS_ERR(mdwc->core_clk)) {
2492 dev_err(mdwc->dev, "failed to get core_clk\n");
2493 ret = PTR_ERR(mdwc->core_clk);
2494 return ret;
2495 }
2496
Amit Nischal4d278212016-06-06 17:54:34 +05302497 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2498 if (IS_ERR(mdwc->core_reset)) {
2499 dev_err(mdwc->dev, "failed to get core_reset\n");
2500 return PTR_ERR(mdwc->core_reset);
2501 }
2502
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302503 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302504 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302505 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2506 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302507 }
2508
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302509 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302510 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302511 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2512 mdwc->core_clk_rate);
2513 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2514 if (ret)
2515 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002516
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002517 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2518 (u32 *)&mdwc->core_clk_rate_hs)) {
2519 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2520 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2521 }
2522
Mayank Rana511f3b22016-08-02 12:00:11 -07002523 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2524 if (IS_ERR(mdwc->sleep_clk)) {
2525 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2526 ret = PTR_ERR(mdwc->sleep_clk);
2527 return ret;
2528 }
2529
2530 clk_set_rate(mdwc->sleep_clk, 32000);
2531 mdwc->utmi_clk_rate = 19200000;
2532 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2533 if (IS_ERR(mdwc->utmi_clk)) {
2534 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2535 ret = PTR_ERR(mdwc->utmi_clk);
2536 return ret;
2537 }
2538
2539 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2540 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2541 if (IS_ERR(mdwc->bus_aggr_clk))
2542 mdwc->bus_aggr_clk = NULL;
2543
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302544 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2545 if (IS_ERR(mdwc->noc_aggr_clk))
2546 mdwc->noc_aggr_clk = NULL;
2547
Mayank Rana511f3b22016-08-02 12:00:11 -07002548 if (of_property_match_string(mdwc->dev->of_node,
2549 "clock-names", "cfg_ahb_clk") >= 0) {
2550 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2551 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2552 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2553 mdwc->cfg_ahb_clk = NULL;
2554 if (ret != -EPROBE_DEFER)
2555 dev_err(mdwc->dev,
2556 "failed to get cfg_ahb_clk ret %d\n",
2557 ret);
2558 return ret;
2559 }
2560 }
2561
2562 return 0;
2563}
2564
2565static int dwc3_msm_id_notifier(struct notifier_block *nb,
2566 unsigned long event, void *ptr)
2567{
2568 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002569 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002570 struct extcon_dev *edev = ptr;
2571 enum dwc3_id_state id;
2572 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002573 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002574
2575 if (!edev) {
2576 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2577 goto done;
2578 }
2579
2580 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2581
2582 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2583
2584 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2585 if (cc_state < 0)
2586 mdwc->typec_orientation = ORIENTATION_NONE;
2587 else
2588 mdwc->typec_orientation =
2589 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2590
Mayank Rana08e41922017-03-02 15:25:48 -08002591 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002592
2593 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2594 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002595 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2596 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002597
Mayank Rana511f3b22016-08-02 12:00:11 -07002598 if (mdwc->id_state != id) {
2599 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002600 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002601 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2602 }
2603
2604done:
2605 return NOTIFY_DONE;
2606}
2607
2608static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2609 unsigned long event, void *ptr)
2610{
2611 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2612 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2613 struct extcon_dev *edev = ptr;
2614 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002615 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002616
2617 if (!edev) {
2618 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2619 goto done;
2620 }
2621
2622 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2623
2624 if (mdwc->vbus_active == event)
2625 return NOTIFY_DONE;
2626
2627 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2628 if (cc_state < 0)
2629 mdwc->typec_orientation = ORIENTATION_NONE;
2630 else
2631 mdwc->typec_orientation =
2632 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2633
Mayank Rana08e41922017-03-02 15:25:48 -08002634 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002635
2636 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2637 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002638 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2639 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002640
Mayank Rana511f3b22016-08-02 12:00:11 -07002641 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002642 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002643 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002644done:
2645 return NOTIFY_DONE;
2646}
Mayank Rana51958172017-02-28 14:49:21 -08002647/*
2648 * Handle EUD based soft detach/attach event, and force USB high speed mode
2649 * functionality on receiving soft attach event.
2650 *
2651 * @nb - notifier handler
2652 * @event - event information i.e. soft detach/attach event
2653 * @ptr - extcon_dev pointer
2654 *
2655 * @return int - NOTIFY_DONE always due to EUD
2656 */
2657static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2658 unsigned long event, void *ptr)
2659{
2660 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2661 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2662 struct extcon_dev *edev = ptr;
2663
2664 if (!edev) {
2665 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2666 goto done;
2667 }
2668
2669 dbg_event(0xFF, "EUD_NB", event);
2670 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2671 if (mdwc->vbus_active == event)
2672 return NOTIFY_DONE;
2673
2674 /* Force USB High-Speed enumeration Only */
2675 dwc->maximum_speed = USB_SPEED_HIGH;
2676 dbg_event(0xFF, "Speed", dwc->maximum_speed);
2677 mdwc->vbus_active = event;
2678 if (dwc->is_drd && !mdwc->in_restart)
2679 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2680done:
2681 return NOTIFY_DONE;
2682}
Mayank Rana511f3b22016-08-02 12:00:11 -07002683
2684static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2685{
2686 struct device_node *node = mdwc->dev->of_node;
2687 struct extcon_dev *edev;
2688 int ret = 0;
2689
2690 if (!of_property_read_bool(node, "extcon"))
2691 return 0;
2692
Mayank Rana51958172017-02-28 14:49:21 -08002693 /* Use first phandle (mandatory) for USB vbus status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002694 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2695 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2696 return PTR_ERR(edev);
2697
2698 if (!IS_ERR(edev)) {
2699 mdwc->extcon_vbus = edev;
2700 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2701 ret = extcon_register_notifier(edev, EXTCON_USB,
2702 &mdwc->vbus_nb);
2703 if (ret < 0) {
2704 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2705 return ret;
2706 }
2707 }
2708
Mayank Rana51958172017-02-28 14:49:21 -08002709 /* Use second phandle (optional) for USB ID status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002710 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2711 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2712 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2713 ret = PTR_ERR(edev);
2714 goto err;
2715 }
2716 }
2717
2718 if (!IS_ERR(edev)) {
2719 mdwc->extcon_id = edev;
2720 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2721 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2722 &mdwc->id_nb);
2723 if (ret < 0) {
2724 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2725 goto err;
2726 }
2727 }
2728
Mayank Rana51958172017-02-28 14:49:21 -08002729 /* Use third phandle (optional) for EUD based detach/attach events */
2730 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2731 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2732 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2733 ret = PTR_ERR(edev);
2734 goto err;
2735 }
2736 }
2737
2738 if (!IS_ERR(edev)) {
2739 mdwc->extcon_eud = edev;
2740 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2741 ret = extcon_register_notifier(edev, EXTCON_USB,
2742 &mdwc->eud_event_nb);
2743 if (ret < 0) {
2744 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
2745 goto err1;
2746 }
2747 }
2748
Mayank Rana511f3b22016-08-02 12:00:11 -07002749 return 0;
Mayank Rana51958172017-02-28 14:49:21 -08002750err1:
2751 if (mdwc->extcon_id)
2752 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2753 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002754err:
2755 if (mdwc->extcon_vbus)
2756 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2757 &mdwc->vbus_nb);
2758 return ret;
2759}
2760
2761static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2762 char *buf)
2763{
2764 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2765
2766 if (mdwc->vbus_active)
2767 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2768 if (mdwc->id_state == DWC3_ID_GROUND)
2769 return snprintf(buf, PAGE_SIZE, "host\n");
2770
2771 return snprintf(buf, PAGE_SIZE, "none\n");
2772}
2773
2774static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2775 const char *buf, size_t count)
2776{
2777 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2778
2779 if (sysfs_streq(buf, "peripheral")) {
2780 mdwc->vbus_active = true;
2781 mdwc->id_state = DWC3_ID_FLOAT;
2782 } else if (sysfs_streq(buf, "host")) {
2783 mdwc->vbus_active = false;
2784 mdwc->id_state = DWC3_ID_GROUND;
2785 } else {
2786 mdwc->vbus_active = false;
2787 mdwc->id_state = DWC3_ID_FLOAT;
2788 }
2789
2790 dwc3_ext_event_notify(mdwc);
2791
2792 return count;
2793}
2794
2795static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302796static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07002797
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08002798static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
2799 char *buf)
2800{
2801 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2802 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2803
2804 return snprintf(buf, PAGE_SIZE, "%s\n",
2805 usb_speed_string(dwc->max_hw_supp_speed));
2806}
2807
2808static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
2809 const char *buf, size_t count)
2810{
2811 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2812 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2813 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
2814
2815 if (sysfs_streq(buf, "high"))
2816 req_speed = USB_SPEED_HIGH;
2817 else if (sysfs_streq(buf, "super"))
2818 req_speed = USB_SPEED_SUPER;
2819
2820 if (req_speed != USB_SPEED_UNKNOWN &&
2821 req_speed != dwc->max_hw_supp_speed) {
2822 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
2823 schedule_work(&mdwc->restart_usb_work);
2824 }
2825
2826 return count;
2827}
2828static DEVICE_ATTR_RW(speed);
2829
Mayank Rana511f3b22016-08-02 12:00:11 -07002830static int dwc3_msm_probe(struct platform_device *pdev)
2831{
2832 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2833 struct device *dev = &pdev->dev;
2834 struct dwc3_msm *mdwc;
2835 struct dwc3 *dwc;
2836 struct resource *res;
2837 void __iomem *tcsr;
2838 bool host_mode;
2839 int ret = 0;
2840 int ext_hub_reset_gpio;
2841 u32 val;
2842
2843 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2844 if (!mdwc)
2845 return -ENOMEM;
2846
2847 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2848 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2849 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2850 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2851 return -EOPNOTSUPP;
2852 }
2853 }
2854
2855 platform_set_drvdata(pdev, mdwc);
2856 mdwc->dev = &pdev->dev;
2857
2858 INIT_LIST_HEAD(&mdwc->req_complete_list);
2859 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2860 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2861 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07002862 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002863 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302864 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002865
2866 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2867 if (!mdwc->dwc3_wq) {
2868 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2869 return -ENOMEM;
2870 }
2871
2872 /* Get all clks and gdsc reference */
2873 ret = dwc3_msm_get_clk_gdsc(mdwc);
2874 if (ret) {
2875 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2876 return ret;
2877 }
2878
2879 mdwc->id_state = DWC3_ID_FLOAT;
2880 set_bit(ID, &mdwc->inputs);
2881
2882 mdwc->charging_disabled = of_property_read_bool(node,
2883 "qcom,charging-disabled");
2884
2885 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2886 &mdwc->lpm_to_suspend_delay);
2887 if (ret) {
2888 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2889 mdwc->lpm_to_suspend_delay = 0;
2890 }
2891
2892 /*
2893 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2894 * DP and DM linestate transitions during low power mode.
2895 */
2896 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2897 if (mdwc->hs_phy_irq < 0) {
2898 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2899 ret = -EINVAL;
2900 goto err;
2901 } else {
2902 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2903 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2904 msm_dwc3_pwr_irq,
2905 msm_dwc3_pwr_irq_thread,
2906 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2907 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2908 if (ret) {
2909 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2910 ret);
2911 goto err;
2912 }
2913 }
2914
2915 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2916 if (mdwc->ss_phy_irq < 0) {
2917 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2918 } else {
2919 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2920 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2921 msm_dwc3_pwr_irq,
2922 msm_dwc3_pwr_irq_thread,
2923 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2924 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2925 if (ret) {
2926 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2927 ret);
2928 goto err;
2929 }
2930 }
2931
2932 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2933 if (mdwc->pwr_event_irq < 0) {
2934 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2935 ret = -EINVAL;
2936 goto err;
2937 } else {
2938 /* will be enabled in dwc3_msm_resume() */
2939 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
2940 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
2941 msm_dwc3_pwr_irq,
2942 msm_dwc3_pwr_irq_thread,
2943 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
2944 "msm_dwc3", mdwc);
2945 if (ret) {
2946 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
2947 ret);
2948 goto err;
2949 }
2950 }
2951
2952 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
2953 if (!res) {
2954 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
2955 } else {
2956 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
2957 resource_size(res));
2958 if (IS_ERR_OR_NULL(tcsr)) {
2959 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
2960 } else {
2961 /* Enable USB3 on the primary USB port. */
2962 writel_relaxed(0x1, tcsr);
2963 /*
2964 * Ensure that TCSR write is completed before
2965 * USB registers initialization.
2966 */
2967 mb();
2968 }
2969 }
2970
2971 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
2972 if (!res) {
2973 dev_err(&pdev->dev, "missing memory base resource\n");
2974 ret = -ENODEV;
2975 goto err;
2976 }
2977
2978 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
2979 resource_size(res));
2980 if (!mdwc->base) {
2981 dev_err(&pdev->dev, "ioremap failed\n");
2982 ret = -ENODEV;
2983 goto err;
2984 }
2985
2986 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2987 "ahb2phy_base");
2988 if (res) {
2989 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
2990 res->start, resource_size(res));
2991 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
2992 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
2993 mdwc->ahb2phy_base = NULL;
2994 } else {
2995 /*
2996 * On some targets cfg_ahb_clk depends upon usb gdsc
2997 * regulator. If cfg_ahb_clk is enabled without
2998 * turning on usb gdsc regulator clk is stuck off.
2999 */
3000 dwc3_msm_config_gdsc(mdwc, 1);
3001 clk_prepare_enable(mdwc->cfg_ahb_clk);
3002 /* Configure AHB2PHY for one wait state read/write*/
3003 val = readl_relaxed(mdwc->ahb2phy_base +
3004 PERIPH_SS_AHB2PHY_TOP_CFG);
3005 if (val != ONE_READ_WRITE_WAIT) {
3006 writel_relaxed(ONE_READ_WRITE_WAIT,
3007 mdwc->ahb2phy_base +
3008 PERIPH_SS_AHB2PHY_TOP_CFG);
3009 /* complete above write before using USB PHY */
3010 mb();
3011 }
3012 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3013 dwc3_msm_config_gdsc(mdwc, 0);
3014 }
3015 }
3016
3017 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3018 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3019 if (IS_ERR(mdwc->dbm)) {
3020 dev_err(&pdev->dev, "unable to get dbm device\n");
3021 ret = -EPROBE_DEFER;
3022 goto err;
3023 }
3024 /*
3025 * Add power event if the dbm indicates coming out of L1
3026 * by interrupt
3027 */
3028 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
3029 if (!mdwc->pwr_event_irq) {
3030 dev_err(&pdev->dev,
3031 "need pwr_event_irq exiting L1\n");
3032 ret = -EINVAL;
3033 goto err;
3034 }
3035 }
3036 }
3037
3038 ext_hub_reset_gpio = of_get_named_gpio(node,
3039 "qcom,ext-hub-reset-gpio", 0);
3040
3041 if (gpio_is_valid(ext_hub_reset_gpio)
3042 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3043 "qcom,ext-hub-reset-gpio"))) {
3044 /* reset external hub */
3045 gpio_direction_output(ext_hub_reset_gpio, 1);
3046 /*
3047 * Hub reset should be asserted for minimum 5microsec
3048 * before deasserting.
3049 */
3050 usleep_range(5, 1000);
3051 gpio_direction_output(ext_hub_reset_gpio, 0);
3052 }
3053
3054 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3055 &mdwc->tx_fifo_size))
3056 dev_err(&pdev->dev,
3057 "unable to read platform data tx fifo size\n");
3058
3059 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3060 "qcom,disable-host-mode-pm");
3061
3062 dwc3_set_notifier(&dwc3_msm_notify_event);
3063
3064 /* Assumes dwc3 is the first DT child of dwc3-msm */
3065 dwc3_node = of_get_next_available_child(node, NULL);
3066 if (!dwc3_node) {
3067 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3068 ret = -ENODEV;
3069 goto err;
3070 }
3071
3072 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3073 if (ret) {
3074 dev_err(&pdev->dev,
3075 "failed to add create dwc3 core\n");
3076 of_node_put(dwc3_node);
3077 goto err;
3078 }
3079
3080 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3081 of_node_put(dwc3_node);
3082 if (!mdwc->dwc3) {
3083 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3084 goto put_dwc3;
3085 }
3086
3087 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3088 "usb-phy", 0);
3089 if (IS_ERR(mdwc->hs_phy)) {
3090 dev_err(&pdev->dev, "unable to get hsphy device\n");
3091 ret = PTR_ERR(mdwc->hs_phy);
3092 goto put_dwc3;
3093 }
3094 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3095 "usb-phy", 1);
3096 if (IS_ERR(mdwc->ss_phy)) {
3097 dev_err(&pdev->dev, "unable to get ssphy device\n");
3098 ret = PTR_ERR(mdwc->ss_phy);
3099 goto put_dwc3;
3100 }
3101
3102 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3103 if (mdwc->bus_scale_table) {
3104 mdwc->bus_perf_client =
3105 msm_bus_scale_register_client(mdwc->bus_scale_table);
3106 }
3107
3108 dwc = platform_get_drvdata(mdwc->dwc3);
3109 if (!dwc) {
3110 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3111 goto put_dwc3;
3112 }
3113
3114 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3115 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3116
3117 if (cpu_to_affin)
3118 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3119
Mayank Ranaf4918d32016-12-15 13:35:55 -08003120 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3121 &mdwc->num_gsi_event_buffers);
3122
Mayank Rana511f3b22016-08-02 12:00:11 -07003123 /*
3124 * Clocks and regulators will not be turned on until the first time
3125 * runtime PM resume is called. This is to allow for booting up with
3126 * charger already connected so as not to disturb PHY line states.
3127 */
3128 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3129 atomic_set(&dwc->in_lpm, 1);
3130 pm_runtime_set_suspended(mdwc->dev);
3131 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3132 pm_runtime_use_autosuspend(mdwc->dev);
3133 pm_runtime_enable(mdwc->dev);
3134 device_init_wakeup(mdwc->dev, 1);
3135
3136 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3137 pm_runtime_get_noresume(mdwc->dev);
3138
3139 ret = dwc3_msm_extcon_register(mdwc);
3140 if (ret)
3141 goto put_dwc3;
3142
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303143 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3144 &mdwc->pm_qos_latency);
3145 if (ret) {
3146 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3147 mdwc->pm_qos_latency = 0;
3148 }
3149
Mayank Rana511f3b22016-08-02 12:00:11 -07003150 /* Update initial VBUS/ID state from extcon */
3151 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
3152 EXTCON_USB))
3153 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
3154 if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
3155 EXTCON_USB_HOST))
3156 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
3157
3158 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003159 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003160
3161 schedule_delayed_work(&mdwc->sm_work, 0);
3162
3163 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3164 if (!dwc->is_drd && host_mode) {
3165 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3166 mdwc->id_state = DWC3_ID_GROUND;
3167 dwc3_ext_event_notify(mdwc);
3168 }
3169
3170 return 0;
3171
3172put_dwc3:
3173 platform_device_put(mdwc->dwc3);
3174 if (mdwc->bus_perf_client)
3175 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3176err:
3177 return ret;
3178}
3179
3180static int dwc3_msm_remove_children(struct device *dev, void *data)
3181{
3182 device_unregister(dev);
3183 return 0;
3184}
3185
3186static int dwc3_msm_remove(struct platform_device *pdev)
3187{
3188 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003189 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003190 int ret_pm;
3191
3192 device_remove_file(&pdev->dev, &dev_attr_mode);
3193
3194 if (cpu_to_affin)
3195 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3196
3197 /*
3198 * In case of system suspend, pm_runtime_get_sync fails.
3199 * Hence turn ON the clocks manually.
3200 */
3201 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003202 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003203 if (ret_pm < 0) {
3204 dev_err(mdwc->dev,
3205 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303206 if (mdwc->noc_aggr_clk)
3207 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003208 clk_prepare_enable(mdwc->utmi_clk);
3209 clk_prepare_enable(mdwc->core_clk);
3210 clk_prepare_enable(mdwc->iface_clk);
3211 clk_prepare_enable(mdwc->sleep_clk);
3212 if (mdwc->bus_aggr_clk)
3213 clk_prepare_enable(mdwc->bus_aggr_clk);
3214 clk_prepare_enable(mdwc->xo_clk);
3215 }
3216
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303217 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003218 cancel_delayed_work_sync(&mdwc->sm_work);
3219
3220 if (mdwc->hs_phy)
3221 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3222 platform_device_put(mdwc->dwc3);
3223 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
3224
Mayank Rana08e41922017-03-02 15:25:48 -08003225 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003226 pm_runtime_disable(mdwc->dev);
3227 pm_runtime_barrier(mdwc->dev);
3228 pm_runtime_put_sync(mdwc->dev);
3229 pm_runtime_set_suspended(mdwc->dev);
3230 device_wakeup_disable(mdwc->dev);
3231
3232 if (mdwc->bus_perf_client)
3233 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3234
3235 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3236 regulator_disable(mdwc->vbus_reg);
3237
3238 disable_irq(mdwc->hs_phy_irq);
3239 if (mdwc->ss_phy_irq)
3240 disable_irq(mdwc->ss_phy_irq);
3241 disable_irq(mdwc->pwr_event_irq);
3242
3243 clk_disable_unprepare(mdwc->utmi_clk);
3244 clk_set_rate(mdwc->core_clk, 19200000);
3245 clk_disable_unprepare(mdwc->core_clk);
3246 clk_disable_unprepare(mdwc->iface_clk);
3247 clk_disable_unprepare(mdwc->sleep_clk);
3248 clk_disable_unprepare(mdwc->xo_clk);
3249 clk_put(mdwc->xo_clk);
3250
3251 dwc3_msm_config_gdsc(mdwc, 0);
3252
3253 return 0;
3254}
3255
Jack Pham4d4e9342016-12-07 19:25:02 -08003256static int dwc3_msm_host_notifier(struct notifier_block *nb,
3257 unsigned long event, void *ptr)
3258{
3259 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3260 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3261 struct usb_device *udev = ptr;
3262 union power_supply_propval pval;
3263 unsigned int max_power;
3264
3265 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3266 return NOTIFY_DONE;
3267
3268 if (!mdwc->usb_psy) {
3269 mdwc->usb_psy = power_supply_get_by_name("usb");
3270 if (!mdwc->usb_psy)
3271 return NOTIFY_DONE;
3272 }
3273
3274 /*
3275 * For direct-attach devices, new udev is direct child of root hub
3276 * i.e. dwc -> xhci -> root_hub -> udev
3277 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3278 */
3279 if (udev->parent && !udev->parent->parent &&
3280 udev->dev.parent->parent == &dwc->xhci->dev) {
3281 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003282 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3283 /*
3284 * Core clock rate can be reduced only if root
3285 * hub SS port is not enabled/connected.
3286 */
3287 clk_set_rate(mdwc->core_clk,
3288 mdwc->core_clk_rate_hs);
3289 dev_dbg(mdwc->dev,
3290 "set hs core clk rate %ld\n",
3291 mdwc->core_clk_rate_hs);
3292 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3293 } else {
3294 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3295 }
3296
Jack Pham4d4e9342016-12-07 19:25:02 -08003297 if (udev->speed >= USB_SPEED_SUPER)
3298 max_power = udev->actconfig->desc.bMaxPower * 8;
3299 else
3300 max_power = udev->actconfig->desc.bMaxPower * 2;
3301 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3302 dev_name(&udev->dev), max_power);
3303
3304 /* inform PMIC of max power so it can optimize boost */
3305 pval.intval = max_power * 1000;
3306 power_supply_set_property(mdwc->usb_psy,
3307 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3308 } else {
3309 pval.intval = 0;
3310 power_supply_set_property(mdwc->usb_psy,
3311 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003312
3313 /* set rate back to default core clk rate */
3314 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3315 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3316 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003317 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003318 }
3319 }
3320
3321 return NOTIFY_DONE;
3322}
3323
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303324static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3325{
3326 static bool curr_perf_mode;
3327 int latency = mdwc->pm_qos_latency;
3328
3329 if ((curr_perf_mode == perf_mode) || !latency)
3330 return;
3331
3332 if (perf_mode)
3333 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3334 else
3335 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3336 PM_QOS_DEFAULT_VALUE);
3337
3338 curr_perf_mode = perf_mode;
3339 pr_debug("%s: latency updated to: %d\n", __func__,
3340 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3341}
3342
3343static void msm_dwc3_perf_vote_work(struct work_struct *w)
3344{
3345 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3346 perf_vote_work.work);
3347 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3348 static unsigned long last_irq_cnt;
3349 bool in_perf_mode = false;
3350
3351 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3352 in_perf_mode = true;
3353
3354 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3355 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3356
3357 last_irq_cnt = dwc->irq_cnt;
3358 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3359 schedule_delayed_work(&mdwc->perf_vote_work,
3360 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3361}
3362
Mayank Rana511f3b22016-08-02 12:00:11 -07003363#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3364
3365/**
3366 * dwc3_otg_start_host - helper function for starting/stoping the host
3367 * controller driver.
3368 *
3369 * @mdwc: Pointer to the dwc3_msm structure.
3370 * @on: start / stop the host controller driver.
3371 *
3372 * Returns 0 on success otherwise negative errno.
3373 */
3374static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3375{
3376 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3377 int ret = 0;
3378
3379 if (!dwc->xhci)
3380 return -EINVAL;
3381
3382 /*
3383 * The vbus_reg pointer could have multiple values
3384 * NULL: regulator_get() hasn't been called, or was previously deferred
3385 * IS_ERR: regulator could not be obtained, so skip using it
3386 * Valid pointer otherwise
3387 */
3388 if (!mdwc->vbus_reg) {
3389 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3390 "vbus_dwc3");
3391 if (IS_ERR(mdwc->vbus_reg) &&
3392 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3393 /* regulators may not be ready, so retry again later */
3394 mdwc->vbus_reg = NULL;
3395 return -EPROBE_DEFER;
3396 }
3397 }
3398
3399 if (on) {
3400 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3401
Mayank Rana511f3b22016-08-02 12:00:11 -07003402 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Hemant Kumarde1df692016-04-26 19:36:48 -07003403 if (dwc->maximum_speed == USB_SPEED_SUPER)
3404 mdwc->ss_phy->flags |= PHY_HOST_MODE;
3405
3406 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003407 dbg_event(0xFF, "StrtHost gync",
3408 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003409 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3410 if (!IS_ERR(mdwc->vbus_reg))
3411 ret = regulator_enable(mdwc->vbus_reg);
3412 if (ret) {
3413 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3414 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3415 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3416 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003417 dbg_event(0xFF, "vregerr psync",
3418 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003419 return ret;
3420 }
3421
3422 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3423
Jack Pham4d4e9342016-12-07 19:25:02 -08003424 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3425 usb_register_notify(&mdwc->host_nb);
3426
Mayank Rana511f3b22016-08-02 12:00:11 -07003427 /*
3428 * FIXME If micro A cable is disconnected during system suspend,
3429 * xhci platform device will be removed before runtime pm is
3430 * enabled for xhci device. Due to this, disable_depth becomes
3431 * greater than one and runtimepm is not enabled for next microA
3432 * connect. Fix this by calling pm_runtime_init for xhci device.
3433 */
3434 pm_runtime_init(&dwc->xhci->dev);
3435 ret = platform_device_add(dwc->xhci);
3436 if (ret) {
3437 dev_err(mdwc->dev,
3438 "%s: failed to add XHCI pdev ret=%d\n",
3439 __func__, ret);
3440 if (!IS_ERR(mdwc->vbus_reg))
3441 regulator_disable(mdwc->vbus_reg);
3442 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3443 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3444 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003445 dbg_event(0xFF, "pdeverr psync",
3446 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003447 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003448 return ret;
3449 }
3450
3451 /*
3452 * In some cases it is observed that USB PHY is not going into
3453 * suspend with host mode suspend functionality. Hence disable
3454 * XHCI's runtime PM here if disable_host_mode_pm is set.
3455 */
3456 if (mdwc->disable_host_mode_pm)
3457 pm_runtime_disable(&dwc->xhci->dev);
3458
3459 mdwc->in_host_mode = true;
3460 dwc3_usb3_phy_suspend(dwc, true);
3461
3462 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003463 dbg_event(0xFF, "StrtHost psync",
3464 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003465 pm_runtime_mark_last_busy(mdwc->dev);
3466 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303467#ifdef CONFIG_SMP
3468 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3469 mdwc->pm_qos_req_dma.irq = dwc->irq;
3470#endif
3471 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3472 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3473 /* start in perf mode for better performance initially */
3474 msm_dwc3_perf_vote_update(mdwc, true);
3475 schedule_delayed_work(&mdwc->perf_vote_work,
3476 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003477 } else {
3478 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3479
3480 if (!IS_ERR(mdwc->vbus_reg))
3481 ret = regulator_disable(mdwc->vbus_reg);
3482 if (ret) {
3483 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3484 return ret;
3485 }
3486
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303487 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3488 msm_dwc3_perf_vote_update(mdwc, false);
3489 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3490
Mayank Rana511f3b22016-08-02 12:00:11 -07003491 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003492 dbg_event(0xFF, "StopHost gsync",
3493 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003494 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3495 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3496 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3497 platform_device_del(dwc->xhci);
Jack Pham4d4e9342016-12-07 19:25:02 -08003498 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003499
3500 /*
3501 * Perform USB hardware RESET (both core reset and DBM reset)
3502 * when moving from host to peripheral. This is required for
3503 * peripheral mode to work.
3504 */
3505 dwc3_msm_block_reset(mdwc, true);
3506
3507 dwc3_usb3_phy_suspend(dwc, false);
3508 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3509
3510 mdwc->in_host_mode = false;
3511
3512 /* re-init core and OTG registers as block reset clears these */
3513 dwc3_post_host_reset_core_init(dwc);
3514 pm_runtime_mark_last_busy(mdwc->dev);
3515 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003516 dbg_event(0xFF, "StopHost psync",
3517 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003518 }
3519
3520 return 0;
3521}
3522
3523static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3524{
3525 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3526
3527 /* Update OTG VBUS Valid from HSPHY to controller */
3528 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3529 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3530 UTMI_OTG_VBUS_VALID,
3531 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3532
3533 /* Update only if Super Speed is supported */
3534 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3535 /* Update VBUS Valid from SSPHY to controller */
3536 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3537 LANE0_PWR_PRESENT,
3538 vbus_present ? LANE0_PWR_PRESENT : 0);
3539 }
3540}
3541
3542/**
3543 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3544 *
3545 * @mdwc: Pointer to the dwc3_msm structure.
3546 * @on: Turn ON/OFF the gadget.
3547 *
3548 * Returns 0 on success otherwise negative errno.
3549 */
3550static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3551{
3552 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3553
3554 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003555 dbg_event(0xFF, "StrtGdgt gsync",
3556 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003557
3558 if (on) {
3559 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3560 __func__, dwc->gadget.name);
3561
3562 dwc3_override_vbus_status(mdwc, true);
3563 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3564 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3565
3566 /*
3567 * Core reset is not required during start peripheral. Only
3568 * DBM reset is required, hence perform only DBM reset here.
3569 */
3570 dwc3_msm_block_reset(mdwc, false);
3571
3572 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3573 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303574#ifdef CONFIG_SMP
3575 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3576 mdwc->pm_qos_req_dma.irq = dwc->irq;
3577#endif
3578 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3579 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3580 /* start in perf mode for better performance initially */
3581 msm_dwc3_perf_vote_update(mdwc, true);
3582 schedule_delayed_work(&mdwc->perf_vote_work,
3583 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003584 } else {
3585 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3586 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303587 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3588 msm_dwc3_perf_vote_update(mdwc, false);
3589 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3590
Mayank Rana511f3b22016-08-02 12:00:11 -07003591 usb_gadget_vbus_disconnect(&dwc->gadget);
3592 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3593 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3594 dwc3_override_vbus_status(mdwc, false);
3595 dwc3_usb3_phy_suspend(dwc, false);
3596 }
3597
3598 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003599 dbg_event(0xFF, "StopGdgt psync",
3600 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003601
3602 return 0;
3603}
3604
3605static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3606{
Jack Pham8caff352016-08-19 16:33:55 -07003607 union power_supply_propval pval = {0};
Jack Phamd72bafe2016-08-09 11:07:22 -07003608 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003609
3610 if (mdwc->charging_disabled)
3611 return 0;
3612
3613 if (mdwc->max_power == mA)
3614 return 0;
3615
3616 if (!mdwc->usb_psy) {
3617 mdwc->usb_psy = power_supply_get_by_name("usb");
3618 if (!mdwc->usb_psy) {
3619 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3620 return -ENODEV;
3621 }
3622 }
3623
Jack Pham8caff352016-08-19 16:33:55 -07003624 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
3625 if (pval.intval != POWER_SUPPLY_TYPE_USB)
3626 return 0;
3627
Mayank Rana511f3b22016-08-02 12:00:11 -07003628 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3629
Mayank Rana511f3b22016-08-02 12:00:11 -07003630 /* Set max current limit in uA */
Jack Pham8caff352016-08-19 16:33:55 -07003631 pval.intval = 1000 * mA;
Jack Phamd72bafe2016-08-09 11:07:22 -07003632 ret = power_supply_set_property(mdwc->usb_psy,
3633 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3634 if (ret) {
3635 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3636 return ret;
3637 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003638
3639 mdwc->max_power = mA;
3640 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003641}
3642
3643
3644/**
3645 * dwc3_otg_sm_work - workqueue function.
3646 *
3647 * @w: Pointer to the dwc3 otg workqueue
3648 *
3649 * NOTE: After any change in otg_state, we must reschdule the state machine.
3650 */
3651static void dwc3_otg_sm_work(struct work_struct *w)
3652{
3653 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3654 struct dwc3 *dwc = NULL;
3655 bool work = 0;
3656 int ret = 0;
3657 unsigned long delay = 0;
3658 const char *state;
3659
3660 if (mdwc->dwc3)
3661 dwc = platform_get_drvdata(mdwc->dwc3);
3662
3663 if (!dwc) {
3664 dev_err(mdwc->dev, "dwc is NULL.\n");
3665 return;
3666 }
3667
3668 state = usb_otg_state_string(mdwc->otg_state);
3669 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08003670 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003671
3672 /* Check OTG state */
3673 switch (mdwc->otg_state) {
3674 case OTG_STATE_UNDEFINED:
3675 /* Do nothing if no cable connected */
3676 if (test_bit(ID, &mdwc->inputs) &&
3677 !test_bit(B_SESS_VLD, &mdwc->inputs))
3678 break;
3679
Mayank Rana08e41922017-03-02 15:25:48 -08003680 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003681 mdwc->otg_state = OTG_STATE_B_IDLE;
3682 /* fall-through */
3683 case OTG_STATE_B_IDLE:
3684 if (!test_bit(ID, &mdwc->inputs)) {
3685 dev_dbg(mdwc->dev, "!id\n");
3686 mdwc->otg_state = OTG_STATE_A_IDLE;
3687 work = 1;
3688 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3689 dev_dbg(mdwc->dev, "b_sess_vld\n");
3690 /*
3691 * Increment pm usage count upon cable connect. Count
3692 * is decremented in OTG_STATE_B_PERIPHERAL state on
3693 * cable disconnect or in bus suspend.
3694 */
3695 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003696 dbg_event(0xFF, "BIDLE gsync",
3697 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003698 dwc3_otg_start_peripheral(mdwc, 1);
3699 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3700 work = 1;
3701 } else {
3702 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3703 dev_dbg(mdwc->dev, "Cable disconnected\n");
3704 }
3705 break;
3706
3707 case OTG_STATE_B_PERIPHERAL:
3708 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3709 !test_bit(ID, &mdwc->inputs)) {
3710 dev_dbg(mdwc->dev, "!id || !bsv\n");
3711 mdwc->otg_state = OTG_STATE_B_IDLE;
3712 dwc3_otg_start_peripheral(mdwc, 0);
3713 /*
3714 * Decrement pm usage count upon cable disconnect
3715 * which was incremented upon cable connect in
3716 * OTG_STATE_B_IDLE state
3717 */
3718 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003719 dbg_event(0xFF, "!BSV psync",
3720 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003721 work = 1;
3722 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3723 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3724 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3725 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3726 /*
3727 * Decrement pm usage count upon bus suspend.
3728 * Count was incremented either upon cable
3729 * connect in OTG_STATE_B_IDLE or host
3730 * initiated resume after bus suspend in
3731 * OTG_STATE_B_SUSPEND state
3732 */
3733 pm_runtime_mark_last_busy(mdwc->dev);
3734 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003735 dbg_event(0xFF, "SUSP put",
3736 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003737 }
3738 break;
3739
3740 case OTG_STATE_B_SUSPEND:
3741 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3742 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3743 mdwc->otg_state = OTG_STATE_B_IDLE;
3744 dwc3_otg_start_peripheral(mdwc, 0);
3745 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3746 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3747 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3748 /*
3749 * Increment pm usage count upon host
3750 * initiated resume. Count was decremented
3751 * upon bus suspend in
3752 * OTG_STATE_B_PERIPHERAL state.
3753 */
3754 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003755 dbg_event(0xFF, "!SUSP gsync",
3756 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003757 }
3758 break;
3759
3760 case OTG_STATE_A_IDLE:
3761 /* Switch to A-Device*/
3762 if (test_bit(ID, &mdwc->inputs)) {
3763 dev_dbg(mdwc->dev, "id\n");
3764 mdwc->otg_state = OTG_STATE_B_IDLE;
3765 mdwc->vbus_retry_count = 0;
3766 work = 1;
3767 } else {
3768 mdwc->otg_state = OTG_STATE_A_HOST;
3769 ret = dwc3_otg_start_host(mdwc, 1);
3770 if ((ret == -EPROBE_DEFER) &&
3771 mdwc->vbus_retry_count < 3) {
3772 /*
3773 * Get regulator failed as regulator driver is
3774 * not up yet. Will try to start host after 1sec
3775 */
3776 mdwc->otg_state = OTG_STATE_A_IDLE;
3777 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3778 delay = VBUS_REG_CHECK_DELAY;
3779 work = 1;
3780 mdwc->vbus_retry_count++;
3781 } else if (ret) {
3782 dev_err(mdwc->dev, "unable to start host\n");
3783 mdwc->otg_state = OTG_STATE_A_IDLE;
3784 goto ret;
3785 }
3786 }
3787 break;
3788
3789 case OTG_STATE_A_HOST:
3790 if (test_bit(ID, &mdwc->inputs)) {
3791 dev_dbg(mdwc->dev, "id\n");
3792 dwc3_otg_start_host(mdwc, 0);
3793 mdwc->otg_state = OTG_STATE_B_IDLE;
3794 mdwc->vbus_retry_count = 0;
3795 work = 1;
3796 } else {
3797 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003798 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003799 if (dwc)
3800 pm_runtime_resume(&dwc->xhci->dev);
3801 }
3802 break;
3803
3804 default:
3805 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3806
3807 }
3808
3809 if (work)
3810 schedule_delayed_work(&mdwc->sm_work, delay);
3811
3812ret:
3813 return;
3814}
3815
3816#ifdef CONFIG_PM_SLEEP
3817static int dwc3_msm_pm_suspend(struct device *dev)
3818{
3819 int ret = 0;
3820 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3821 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3822
3823 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003824 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003825
3826 flush_workqueue(mdwc->dwc3_wq);
3827 if (!atomic_read(&dwc->in_lpm)) {
3828 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3829 return -EBUSY;
3830 }
3831
3832 ret = dwc3_msm_suspend(mdwc);
3833 if (!ret)
3834 atomic_set(&mdwc->pm_suspended, 1);
3835
3836 return ret;
3837}
3838
3839static int dwc3_msm_pm_resume(struct device *dev)
3840{
3841 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003842 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003843
3844 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003845 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003846
Mayank Rana511f3b22016-08-02 12:00:11 -07003847 /* flush to avoid race in read/write of pm_suspended */
3848 flush_workqueue(mdwc->dwc3_wq);
3849 atomic_set(&mdwc->pm_suspended, 0);
3850
3851 /* kick in otg state machine */
3852 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3853
3854 return 0;
3855}
3856#endif
3857
3858#ifdef CONFIG_PM
3859static int dwc3_msm_runtime_idle(struct device *dev)
3860{
Mayank Rana08e41922017-03-02 15:25:48 -08003861 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3862 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3863
Mayank Rana511f3b22016-08-02 12:00:11 -07003864 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003865 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003866
3867 return 0;
3868}
3869
3870static int dwc3_msm_runtime_suspend(struct device *dev)
3871{
3872 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003873 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003874
3875 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003876 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003877
3878 return dwc3_msm_suspend(mdwc);
3879}
3880
3881static int dwc3_msm_runtime_resume(struct device *dev)
3882{
3883 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003884 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003885
3886 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003887 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003888
3889 return dwc3_msm_resume(mdwc);
3890}
3891#endif
3892
3893static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3894 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3895 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3896 dwc3_msm_runtime_idle)
3897};
3898
3899static const struct of_device_id of_dwc3_matach[] = {
3900 {
3901 .compatible = "qcom,dwc-usb3-msm",
3902 },
3903 { },
3904};
3905MODULE_DEVICE_TABLE(of, of_dwc3_matach);
3906
3907static struct platform_driver dwc3_msm_driver = {
3908 .probe = dwc3_msm_probe,
3909 .remove = dwc3_msm_remove,
3910 .driver = {
3911 .name = "msm-dwc3",
3912 .pm = &dwc3_msm_dev_pm_ops,
3913 .of_match_table = of_dwc3_matach,
3914 },
3915};
3916
3917MODULE_LICENSE("GPL v2");
3918MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
3919
3920static int dwc3_msm_init(void)
3921{
3922 return platform_driver_register(&dwc3_msm_driver);
3923}
3924module_init(dwc3_msm_init);
3925
3926static void __exit dwc3_msm_exit(void)
3927{
3928 platform_driver_unregister(&dwc3_msm_driver);
3929}
3930module_exit(dwc3_msm_exit);