blob: ec3e90a07d619bfdd166ad036a4229446b373ec1 [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/delay.h>
30#include <linux/of.h>
31#include <linux/of_platform.h>
32#include <linux/of_gpio.h>
33#include <linux/list.h>
34#include <linux/uaccess.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070038#include <linux/regulator/consumer.h>
39#include <linux/pm_wakeup.h>
40#include <linux/power_supply.h>
41#include <linux/cdev.h>
42#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070043#include <linux/msm-bus.h>
44#include <linux/irq.h>
45#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053046#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070047#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070048
49#include "power.h"
50#include "core.h"
51#include "gadget.h"
52#include "dbm.h"
53#include "debug.h"
54#include "xhci.h"
55
56/* time out to wait for USB cable status notification (in ms)*/
57#define SM_INIT_TIMEOUT 30000
58
59/* AHB2PHY register offsets */
60#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
61
62/* AHB2PHY read/write waite value */
63#define ONE_READ_WRITE_WAIT 0x11
64
65/* cpu to fix usb interrupt */
66static int cpu_to_affin;
67module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
69
70/* XHCI registers */
71#define USB3_HCSPARAMS1 (0x4)
72#define USB3_PORTSC (0x420)
73
74/**
75 * USB QSCRATCH Hardware registers
76 *
77 */
78#define QSCRATCH_REG_OFFSET (0x000F8800)
79#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
80#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
81#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
82#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
83
84#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
85#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
86#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
87#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
88#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
89
90/* QSCRATCH_GENERAL_CFG register bit offset */
91#define PIPE_UTMI_CLK_SEL BIT(0)
92#define PIPE3_PHYSTATUS_SW BIT(3)
93#define PIPE_UTMI_CLK_DIS BIT(8)
94
95#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
96#define UTMI_OTG_VBUS_VALID BIT(20)
97#define SW_SESSVLD_SEL BIT(28)
98
99#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
100#define LANE0_PWR_PRESENT BIT(24)
101
102/* GSI related registers */
103#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
104#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
105
106#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
107#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
108#define GSI_CLK_EN_MASK BIT(12)
109#define BLOCK_GSI_WR_GO_MASK BIT(1)
110#define GSI_EN_MASK BIT(0)
111
112#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
113#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
114#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
115#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
116
117#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
118#define GSI_WR_CTRL_STATE_MASK BIT(15)
119
Mayank Ranaf4918d32016-12-15 13:35:55 -0800120#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
121#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
122#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
123#define DWC3_GEVENT_TYPE_GSI 0x3
124
Mayank Rana511f3b22016-08-02 12:00:11 -0700125struct dwc3_msm_req_complete {
126 struct list_head list_item;
127 struct usb_request *req;
128 void (*orig_complete)(struct usb_ep *ep,
129 struct usb_request *req);
130};
131
132enum dwc3_id_state {
133 DWC3_ID_GROUND = 0,
134 DWC3_ID_FLOAT,
135};
136
137/* for type c cable */
138enum plug_orientation {
139 ORIENTATION_NONE,
140 ORIENTATION_CC1,
141 ORIENTATION_CC2,
142};
143
144/* Input bits to state machine (mdwc->inputs) */
145
146#define ID 0
147#define B_SESS_VLD 1
148#define B_SUSPEND 2
149
150struct dwc3_msm {
151 struct device *dev;
152 void __iomem *base;
153 void __iomem *ahb2phy_base;
154 struct platform_device *dwc3;
155 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
156 struct list_head req_complete_list;
157 struct clk *xo_clk;
158 struct clk *core_clk;
159 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800160 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700161 struct clk *iface_clk;
162 struct clk *sleep_clk;
163 struct clk *utmi_clk;
164 unsigned int utmi_clk_rate;
165 struct clk *utmi_clk_src;
166 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530167 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700168 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530169 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700170 struct regulator *dwc3_gdsc;
171
172 struct usb_phy *hs_phy, *ss_phy;
173
174 struct dbm *dbm;
175
176 /* VBUS regulator for host mode */
177 struct regulator *vbus_reg;
178 int vbus_retry_count;
179 bool resume_pending;
180 atomic_t pm_suspended;
181 int hs_phy_irq;
182 int ss_phy_irq;
183 struct work_struct resume_work;
184 struct work_struct restart_usb_work;
185 bool in_restart;
186 struct workqueue_struct *dwc3_wq;
187 struct delayed_work sm_work;
188 unsigned long inputs;
189 unsigned int max_power;
190 bool charging_disabled;
191 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700192 struct work_struct bus_vote_w;
193 unsigned int bus_vote;
194 u32 bus_perf_client;
195 struct msm_bus_scale_pdata *bus_scale_table;
196 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700197 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700198 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800199 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700200 unsigned int tx_fifo_size;
201 bool vbus_active;
202 bool suspend;
203 bool disable_host_mode_pm;
204 enum dwc3_id_state id_state;
205 unsigned long lpm_flags;
206#define MDWC3_SS_PHY_SUSPEND BIT(0)
207#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
208#define MDWC3_POWER_COLLAPSE BIT(2)
209
210 unsigned int irq_to_affin;
211 struct notifier_block dwc3_cpu_notifier;
212
213 struct extcon_dev *extcon_vbus;
214 struct extcon_dev *extcon_id;
215 struct notifier_block vbus_nb;
216 struct notifier_block id_nb;
217
Jack Pham4d4e9342016-12-07 19:25:02 -0800218 struct notifier_block host_nb;
219
Mayank Rana511f3b22016-08-02 12:00:11 -0700220 int pwr_event_irq;
221 atomic_t in_p3;
222 unsigned int lpm_to_suspend_delay;
223 bool init;
224 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800225 u32 num_gsi_event_buffers;
226 struct dwc3_event_buffer **gsi_ev_buff;
Mayank Rana511f3b22016-08-02 12:00:11 -0700227};
228
229#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
230#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
231#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
232
233#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
234#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
235#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
236
237#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
238#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
239#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
240
241#define DSTS_CONNECTSPD_SS 0x4
242
243
244static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
245static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800246static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana511f3b22016-08-02 12:00:11 -0700247/**
248 *
249 * Read register with debug info.
250 *
251 * @base - DWC3 base virtual address.
252 * @offset - register offset.
253 *
254 * @return u32
255 */
256static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
257{
258 u32 val = ioread32(base + offset);
259 return val;
260}
261
262/**
263 * Read register masked field with debug info.
264 *
265 * @base - DWC3 base virtual address.
266 * @offset - register offset.
267 * @mask - register bitmask.
268 *
269 * @return u32
270 */
271static inline u32 dwc3_msm_read_reg_field(void *base,
272 u32 offset,
273 const u32 mask)
274{
275 u32 shift = find_first_bit((void *)&mask, 32);
276 u32 val = ioread32(base + offset);
277
278 val &= mask; /* clear other bits */
279 val >>= shift;
280 return val;
281}
282
283/**
284 *
285 * Write register with debug info.
286 *
287 * @base - DWC3 base virtual address.
288 * @offset - register offset.
289 * @val - value to write.
290 *
291 */
292static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
293{
294 iowrite32(val, base + offset);
295}
296
297/**
298 * Write register masked field with debug info.
299 *
300 * @base - DWC3 base virtual address.
301 * @offset - register offset.
302 * @mask - register bitmask.
303 * @val - value to write.
304 *
305 */
306static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
307 const u32 mask, u32 val)
308{
309 u32 shift = find_first_bit((void *)&mask, 32);
310 u32 tmp = ioread32(base + offset);
311
312 tmp &= ~mask; /* clear written bits */
313 val = tmp | (val << shift);
314 iowrite32(val, base + offset);
315}
316
317/**
318 * Write register and read back masked value to confirm it is written
319 *
320 * @base - DWC3 base virtual address.
321 * @offset - register offset.
322 * @mask - register bitmask specifying what should be updated
323 * @val - value to write.
324 *
325 */
326static inline void dwc3_msm_write_readback(void *base, u32 offset,
327 const u32 mask, u32 val)
328{
329 u32 write_val, tmp = ioread32(base + offset);
330
331 tmp &= ~mask; /* retain other bits */
332 write_val = tmp | val;
333
334 iowrite32(write_val, base + offset);
335
336 /* Read back to see if val was written */
337 tmp = ioread32(base + offset);
338 tmp &= mask; /* clear other bits */
339
340 if (tmp != val)
341 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
342 __func__, val, offset);
343}
344
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800345static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
346{
347 int i, num_ports;
348 u32 reg;
349
350 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
351 num_ports = HCS_MAX_PORTS(reg);
352
353 for (i = 0; i < num_ports; i++) {
354 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
355 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
356 return true;
357 }
358
359 return false;
360}
361
Mayank Rana511f3b22016-08-02 12:00:11 -0700362static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
363{
364 int i, num_ports;
365 u32 reg;
366
367 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
368 num_ports = HCS_MAX_PORTS(reg);
369
370 for (i = 0; i < num_ports; i++) {
371 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
372 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
373 return true;
374 }
375
376 return false;
377}
378
379static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
380{
381 u8 speed;
382
383 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
384 return !!(speed & DSTS_CONNECTSPD_SS);
385}
386
387static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
388{
389 if (mdwc->in_host_mode)
390 return dwc3_msm_is_host_superspeed(mdwc);
391
392 return dwc3_msm_is_dev_superspeed(mdwc);
393}
394
395#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
396/**
397 * Configure the DBM with the BAM's data fifo.
398 * This function is called by the USB BAM Driver
399 * upon initialization.
400 *
401 * @ep - pointer to usb endpoint.
402 * @addr - address of data fifo.
403 * @size - size of data fifo.
404 *
405 */
406int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
407 u32 size, u8 dst_pipe_idx)
408{
409 struct dwc3_ep *dep = to_dwc3_ep(ep);
410 struct dwc3 *dwc = dep->dwc;
411 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
412
413 dev_dbg(mdwc->dev, "%s\n", __func__);
414
415 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
416 dst_pipe_idx);
417}
418
419
420/**
421* Cleanups for msm endpoint on request complete.
422*
423* Also call original request complete.
424*
425* @usb_ep - pointer to usb_ep instance.
426* @request - pointer to usb_request instance.
427*
428* @return int - 0 on success, negative on error.
429*/
430static void dwc3_msm_req_complete_func(struct usb_ep *ep,
431 struct usb_request *request)
432{
433 struct dwc3_ep *dep = to_dwc3_ep(ep);
434 struct dwc3 *dwc = dep->dwc;
435 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
436 struct dwc3_msm_req_complete *req_complete = NULL;
437
438 /* Find original request complete function and remove it from list */
439 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
440 if (req_complete->req == request)
441 break;
442 }
443 if (!req_complete || req_complete->req != request) {
444 dev_err(dep->dwc->dev, "%s: could not find the request\n",
445 __func__);
446 return;
447 }
448 list_del(&req_complete->list_item);
449
450 /*
451 * Release another one TRB to the pool since DBM queue took 2 TRBs
452 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
453 * released only one.
454 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700455 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700456
457 /* Unconfigure dbm ep */
458 dbm_ep_unconfig(mdwc->dbm, dep->number);
459
460 /*
461 * If this is the last endpoint we unconfigured, than reset also
462 * the event buffers; unless unconfiguring the ep due to lpm,
463 * in which case the event buffer only gets reset during the
464 * block reset.
465 */
466 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
467 !dbm_reset_ep_after_lpm(mdwc->dbm))
468 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
469
470 /*
471 * Call original complete function, notice that dwc->lock is already
472 * taken by the caller of this function (dwc3_gadget_giveback()).
473 */
474 request->complete = req_complete->orig_complete;
475 if (request->complete)
476 request->complete(ep, request);
477
478 kfree(req_complete);
479}
480
481
482/**
483* Helper function
484*
485* Reset DBM endpoint.
486*
487* @mdwc - pointer to dwc3_msm instance.
488* @dep - pointer to dwc3_ep instance.
489*
490* @return int - 0 on success, negative on error.
491*/
492static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
493{
494 int ret;
495
496 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
497
498 /* Reset the dbm endpoint */
499 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
500 if (ret) {
501 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
502 __func__);
503 return ret;
504 }
505
506 /*
507 * The necessary delay between asserting and deasserting the dbm ep
508 * reset is based on the number of active endpoints. If there is more
509 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
510 * delay will suffice.
511 */
512 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
513 usleep_range(1000, 1200);
514 else
515 udelay(10);
516 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
517 if (ret) {
518 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
519 __func__);
520 return ret;
521 }
522
523 return 0;
524}
525
526/**
527* Reset the DBM endpoint which is linked to the given USB endpoint.
528*
529* @usb_ep - pointer to usb_ep instance.
530*
531* @return int - 0 on success, negative on error.
532*/
533
534int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
535{
536 struct dwc3_ep *dep = to_dwc3_ep(ep);
537 struct dwc3 *dwc = dep->dwc;
538 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
539
540 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
541}
542EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
543
544
545/**
546* Helper function.
547* See the header of the dwc3_msm_ep_queue function.
548*
549* @dwc3_ep - pointer to dwc3_ep instance.
550* @req - pointer to dwc3_request instance.
551*
552* @return int - 0 on success, negative on error.
553*/
554static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
555{
556 struct dwc3_trb *trb;
557 struct dwc3_trb *trb_link;
558 struct dwc3_gadget_ep_cmd_params params;
559 u32 cmd;
560 int ret = 0;
561
Mayank Rana83ad5822016-08-09 14:17:22 -0700562 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700563 * this request is issued with start transfer. The request will be out
564 * from this list in 2 cases. The first is that the transfer will be
565 * completed (not if the transfer is endless using a circular TRBs with
566 * with link TRB). The second case is an option to do stop stransfer,
567 * this can be initiated by the function driver when calling dequeue.
568 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700569 req->started = true;
570 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700571
572 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700573 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
574 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700575 memset(trb, 0, sizeof(*trb));
576
577 req->trb = trb;
578 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
579 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
580 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
581 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
582 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
583
584 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700585 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
586 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700587 memset(trb_link, 0, sizeof(*trb_link));
588
589 trb_link->bpl = lower_32_bits(req->trb_dma);
590 trb_link->bph = DBM_TRB_BIT |
591 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
592 trb_link->size = 0;
593 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
594
595 /*
596 * Now start the transfer
597 */
598 memset(&params, 0, sizeof(params));
599 params.param0 = 0; /* TDAddr High */
600 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
601
602 /* DBM requires IOC to be set */
603 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700604 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700605 if (ret < 0) {
606 dev_dbg(dep->dwc->dev,
607 "%s: failed to send STARTTRANSFER command\n",
608 __func__);
609
610 list_del(&req->list);
611 return ret;
612 }
613 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700614 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700615
616 return ret;
617}
618
619/**
620* Queue a usb request to the DBM endpoint.
621* This function should be called after the endpoint
622* was enabled by the ep_enable.
623*
624* This function prepares special structure of TRBs which
625* is familiar with the DBM HW, so it will possible to use
626* this endpoint in DBM mode.
627*
628* The TRBs prepared by this function, is one normal TRB
629* which point to a fake buffer, followed by a link TRB
630* that points to the first TRB.
631*
632* The API of this function follow the regular API of
633* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
634*
635* @usb_ep - pointer to usb_ep instance.
636* @request - pointer to usb_request instance.
637* @gfp_flags - possible flags.
638*
639* @return int - 0 on success, negative on error.
640*/
641static int dwc3_msm_ep_queue(struct usb_ep *ep,
642 struct usb_request *request, gfp_t gfp_flags)
643{
644 struct dwc3_request *req = to_dwc3_request(request);
645 struct dwc3_ep *dep = to_dwc3_ep(ep);
646 struct dwc3 *dwc = dep->dwc;
647 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
648 struct dwc3_msm_req_complete *req_complete;
649 unsigned long flags;
650 int ret = 0, size;
651 u8 bam_pipe;
652 bool producer;
653 bool disable_wb;
654 bool internal_mem;
655 bool ioc;
656 bool superspeed;
657
658 if (!(request->udc_priv & MSM_SPS_MODE)) {
659 /* Not SPS mode, call original queue */
660 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
661 __func__);
662
663 return (mdwc->original_ep_ops[dep->number])->queue(ep,
664 request,
665 gfp_flags);
666 }
667
668 /* HW restriction regarding TRB size (8KB) */
669 if (req->request.length < 0x2000) {
670 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
671 return -EINVAL;
672 }
673
674 /*
675 * Override req->complete function, but before doing that,
676 * store it's original pointer in the req_complete_list.
677 */
678 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
679 if (!req_complete)
680 return -ENOMEM;
681
682 req_complete->req = request;
683 req_complete->orig_complete = request->complete;
684 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
685 request->complete = dwc3_msm_req_complete_func;
686
687 /*
688 * Configure the DBM endpoint
689 */
690 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
691 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
692 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
693 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
694 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
695
696 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
697 disable_wb, internal_mem, ioc);
698 if (ret < 0) {
699 dev_err(mdwc->dev,
700 "error %d after calling dbm_ep_config\n", ret);
701 return ret;
702 }
703
704 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
705 __func__, request, ep->name, request->length);
706 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
707 dbm_event_buffer_config(mdwc->dbm,
708 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
709 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
710 DWC3_GEVNTSIZ_SIZE(size));
711
712 /*
713 * We must obtain the lock of the dwc3 core driver,
714 * including disabling interrupts, so we will be sure
715 * that we are the only ones that configure the HW device
716 * core and ensure that we queuing the request will finish
717 * as soon as possible so we will release back the lock.
718 */
719 spin_lock_irqsave(&dwc->lock, flags);
720 if (!dep->endpoint.desc) {
721 dev_err(mdwc->dev,
722 "%s: trying to queue request %p to disabled ep %s\n",
723 __func__, request, ep->name);
724 ret = -EPERM;
725 goto err;
726 }
727
728 if (dep->number == 0 || dep->number == 1) {
729 dev_err(mdwc->dev,
730 "%s: trying to queue dbm request %p to control ep %s\n",
731 __func__, request, ep->name);
732 ret = -EPERM;
733 goto err;
734 }
735
736
Mayank Rana83ad5822016-08-09 14:17:22 -0700737 if (dep->trb_dequeue != dep->trb_enqueue ||
738 !list_empty(&dep->pending_list)
739 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700740 dev_err(mdwc->dev,
741 "%s: trying to queue dbm request %p tp ep %s\n",
742 __func__, request, ep->name);
743 ret = -EPERM;
744 goto err;
745 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700746 dep->trb_dequeue = 0;
747 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700748 }
749
750 ret = __dwc3_msm_ep_queue(dep, req);
751 if (ret < 0) {
752 dev_err(mdwc->dev,
753 "error %d after calling __dwc3_msm_ep_queue\n", ret);
754 goto err;
755 }
756
757 spin_unlock_irqrestore(&dwc->lock, flags);
758 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
759 dbm_set_speed(mdwc->dbm, (u8)superspeed);
760
761 return 0;
762
763err:
764 spin_unlock_irqrestore(&dwc->lock, flags);
765 kfree(req_complete);
766 return ret;
767}
768
769/*
770* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
771*
772* @usb_ep - pointer to usb_ep instance.
773*
774* @return int - XferRscIndex
775*/
776static inline int gsi_get_xfer_index(struct usb_ep *ep)
777{
778 struct dwc3_ep *dep = to_dwc3_ep(ep);
779
780 return dep->resource_index;
781}
782
783/*
784* Fills up the GSI channel information needed in call to IPA driver
785* for GSI channel creation.
786*
787* @usb_ep - pointer to usb_ep instance.
788* @ch_info - output parameter with requested channel info
789*/
790static void gsi_get_channel_info(struct usb_ep *ep,
791 struct gsi_channel_info *ch_info)
792{
793 struct dwc3_ep *dep = to_dwc3_ep(ep);
794 int last_trb_index = 0;
795 struct dwc3 *dwc = dep->dwc;
796 struct usb_gsi_request *request = ch_info->ch_req;
797
798 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
799 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700800 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700801 ch_info->depcmd_hi_addr = 0;
802
803 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
804 &dep->trb_pool[0]);
805 /* Convert to multipled of 1KB */
806 ch_info->const_buffer_size = request->buf_len/1024;
807
808 /* IN direction */
809 if (dep->direction) {
810 /*
811 * Multiply by size of each TRB for xfer_ring_len in bytes.
812 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
813 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
814 */
815 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
816 last_trb_index = 2 * request->num_bufs + 2;
817 } else { /* OUT direction */
818 /*
819 * Multiply by size of each TRB for xfer_ring_len in bytes.
820 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
821 * LINK TRB.
822 */
823 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
824 last_trb_index = request->num_bufs + 1;
825 }
826
827 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
828 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
829 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
830 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
831 DWC3_GEVNTCOUNT(ep->ep_intr_num));
832 ch_info->gevntcount_hi_addr = 0;
833
834 dev_dbg(dwc->dev,
835 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
836 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
837 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
838}
839
840/*
841* Perform StartXfer on GSI EP. Stores XferRscIndex.
842*
843* @usb_ep - pointer to usb_ep instance.
844*
845* @return int - 0 on success
846*/
847static int gsi_startxfer_for_ep(struct usb_ep *ep)
848{
849 int ret;
850 struct dwc3_gadget_ep_cmd_params params;
851 u32 cmd;
852 struct dwc3_ep *dep = to_dwc3_ep(ep);
853 struct dwc3 *dwc = dep->dwc;
854
855 memset(&params, 0, sizeof(params));
856 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
857 params.param0 |= (ep->ep_intr_num << 16);
858 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
859 &dep->trb_pool[0]));
860 cmd = DWC3_DEPCMD_STARTTRANSFER;
861 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700862 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700863
864 if (ret < 0)
865 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700866 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700867 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
868 return ret;
869}
870
871/*
872* Store Ring Base and Doorbell Address for GSI EP
873* for GSI channel creation.
874*
875* @usb_ep - pointer to usb_ep instance.
876* @dbl_addr - Doorbell address obtained from IPA driver
877*/
878static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
879{
880 struct dwc3_ep *dep = to_dwc3_ep(ep);
881 struct dwc3 *dwc = dep->dwc;
882 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
883 int n = ep->ep_intr_num - 1;
884
885 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
886 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
887 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
888
889 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
890 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
891 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
892 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
893}
894
895/*
896* Rings Doorbell for IN GSI Channel
897*
898* @usb_ep - pointer to usb_ep instance.
899* @request - pointer to GSI request. This is used to pass in the
900* address of the GSI doorbell obtained from IPA driver
901*/
902static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
903{
904 void __iomem *gsi_dbl_address_lsb;
905 void __iomem *gsi_dbl_address_msb;
906 dma_addr_t offset;
907 u64 dbl_addr = *((u64 *)request->buf_base_addr);
908 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
909 u32 dbl_hi_addr = (dbl_addr >> 32);
910 u32 num_trbs = (request->num_bufs * 2 + 2);
911 struct dwc3_ep *dep = to_dwc3_ep(ep);
912 struct dwc3 *dwc = dep->dwc;
913 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
914
915 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
916 dbl_lo_addr, sizeof(u32));
917 if (!gsi_dbl_address_lsb)
918 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
919
920 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
921 dbl_hi_addr, sizeof(u32));
922 if (!gsi_dbl_address_msb)
923 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
924
925 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
926 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
927 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
928
929 writel_relaxed(offset, gsi_dbl_address_lsb);
930 writel_relaxed(0, gsi_dbl_address_msb);
931}
932
933/*
934* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
935*
936* @usb_ep - pointer to usb_ep instance.
937* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
938*
939* @return int - 0 on success
940*/
941static int gsi_updatexfer_for_ep(struct usb_ep *ep,
942 struct usb_gsi_request *request)
943{
944 int i;
945 int ret;
946 u32 cmd;
947 int num_trbs = request->num_bufs + 1;
948 struct dwc3_trb *trb;
949 struct dwc3_gadget_ep_cmd_params params;
950 struct dwc3_ep *dep = to_dwc3_ep(ep);
951 struct dwc3 *dwc = dep->dwc;
952
953 for (i = 0; i < num_trbs - 1; i++) {
954 trb = &dep->trb_pool[i];
955 trb->ctrl |= DWC3_TRB_CTRL_HWO;
956 }
957
958 memset(&params, 0, sizeof(params));
959 cmd = DWC3_DEPCMD_UPDATETRANSFER;
960 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700961 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700962 dep->flags |= DWC3_EP_BUSY;
963 if (ret < 0)
964 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
965 return ret;
966}
967
968/*
969* Perform EndXfer on particular GSI EP.
970*
971* @usb_ep - pointer to usb_ep instance.
972*/
973static void gsi_endxfer_for_ep(struct usb_ep *ep)
974{
975 struct dwc3_ep *dep = to_dwc3_ep(ep);
976 struct dwc3 *dwc = dep->dwc;
977
978 dwc3_stop_active_transfer(dwc, dep->number, true);
979}
980
981/*
982* Allocates and configures TRBs for GSI EPs.
983*
984* @usb_ep - pointer to usb_ep instance.
985* @request - pointer to GSI request.
986*
987* @return int - 0 on success
988*/
989static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
990{
991 int i = 0;
992 dma_addr_t buffer_addr = req->dma;
993 struct dwc3_ep *dep = to_dwc3_ep(ep);
994 struct dwc3 *dwc = dep->dwc;
995 struct dwc3_trb *trb;
996 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
997 : (req->num_bufs + 1);
998
999 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
1000 num_trbs * sizeof(struct dwc3_trb),
1001 num_trbs * sizeof(struct dwc3_trb), 0);
1002 if (!dep->trb_dma_pool) {
1003 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1004 dep->name);
1005 return -ENOMEM;
1006 }
1007
1008 dep->num_trbs = num_trbs;
1009
1010 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
1011 GFP_KERNEL, &dep->trb_pool_dma);
1012 if (!dep->trb_pool) {
1013 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
1014 dep->name);
1015 return -ENOMEM;
1016 }
1017
1018 /* IN direction */
1019 if (dep->direction) {
1020 for (i = 0; i < num_trbs ; i++) {
1021 trb = &dep->trb_pool[i];
1022 memset(trb, 0, sizeof(*trb));
1023 /* Set up first n+1 TRBs for ZLPs */
1024 if (i < (req->num_bufs + 1)) {
1025 trb->bpl = 0;
1026 trb->bph = 0;
1027 trb->size = 0;
1028 trb->ctrl = DWC3_TRBCTL_NORMAL
1029 | DWC3_TRB_CTRL_IOC;
1030 continue;
1031 }
1032
1033 /* Setup n TRBs pointing to valid buffers */
1034 trb->bpl = lower_32_bits(buffer_addr);
1035 trb->bph = 0;
1036 trb->size = 0;
1037 trb->ctrl = DWC3_TRBCTL_NORMAL
1038 | DWC3_TRB_CTRL_IOC;
1039 buffer_addr += req->buf_len;
1040
1041 /* Set up the Link TRB at the end */
1042 if (i == (num_trbs - 1)) {
1043 trb->bpl = dwc3_trb_dma_offset(dep,
1044 &dep->trb_pool[0]);
1045 trb->bph = (1 << 23) | (1 << 21)
1046 | (ep->ep_intr_num << 16);
1047 trb->size = 0;
1048 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1049 | DWC3_TRB_CTRL_HWO;
1050 }
1051 }
1052 } else { /* OUT direction */
1053
1054 for (i = 0; i < num_trbs ; i++) {
1055
1056 trb = &dep->trb_pool[i];
1057 memset(trb, 0, sizeof(*trb));
1058 trb->bpl = lower_32_bits(buffer_addr);
1059 trb->bph = 0;
1060 trb->size = req->buf_len;
1061 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1062 | DWC3_TRB_CTRL_CSP
1063 | DWC3_TRB_CTRL_ISP_IMI;
1064 buffer_addr += req->buf_len;
1065
1066 /* Set up the Link TRB at the end */
1067 if (i == (num_trbs - 1)) {
1068 trb->bpl = dwc3_trb_dma_offset(dep,
1069 &dep->trb_pool[0]);
1070 trb->bph = (1 << 23) | (1 << 21)
1071 | (ep->ep_intr_num << 16);
1072 trb->size = 0;
1073 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1074 | DWC3_TRB_CTRL_HWO;
1075 }
1076 }
1077 }
1078 return 0;
1079}
1080
1081/*
1082* Frees TRBs for GSI EPs.
1083*
1084* @usb_ep - pointer to usb_ep instance.
1085*
1086*/
1087static void gsi_free_trbs(struct usb_ep *ep)
1088{
1089 struct dwc3_ep *dep = to_dwc3_ep(ep);
1090
1091 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1092 return;
1093
1094 /* Free TRBs and TRB pool for EP */
1095 if (dep->trb_dma_pool) {
1096 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1097 dep->trb_pool_dma);
1098 dma_pool_destroy(dep->trb_dma_pool);
1099 dep->trb_pool = NULL;
1100 dep->trb_pool_dma = 0;
1101 dep->trb_dma_pool = NULL;
1102 }
1103}
1104/*
1105* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1106*
1107* @usb_ep - pointer to usb_ep instance.
1108* @request - pointer to GSI request.
1109*/
1110static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1111{
1112 struct dwc3_ep *dep = to_dwc3_ep(ep);
1113 struct dwc3 *dwc = dep->dwc;
1114 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1115 struct dwc3_gadget_ep_cmd_params params;
1116 const struct usb_endpoint_descriptor *desc = ep->desc;
1117 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1118 u32 reg;
1119
1120 memset(&params, 0x00, sizeof(params));
1121
1122 /* Configure GSI EP */
1123 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1124 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1125
1126 /* Burst size is only needed in SuperSpeed mode */
1127 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1128 u32 burst = dep->endpoint.maxburst - 1;
1129
1130 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1131 }
1132
1133 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1134 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1135 | DWC3_DEPCFG_STREAM_EVENT_EN;
1136 dep->stream_capable = true;
1137 }
1138
1139 /* Set EP number */
1140 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1141
1142 /* Set interrupter number for GSI endpoints */
1143 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1144
1145 /* Enable XferInProgress and XferComplete Interrupts */
1146 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1147 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1148 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1149 /*
1150 * We must use the lower 16 TX FIFOs even though
1151 * HW might have more
1152 */
1153 /* Remove FIFO Number for GSI EP*/
1154 if (dep->direction)
1155 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1156
1157 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1158
1159 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1160 params.param0, params.param1, params.param2, dep->name);
1161
Mayank Rana83ad5822016-08-09 14:17:22 -07001162 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001163
1164 /* Set XferRsc Index for GSI EP */
1165 if (!(dep->flags & DWC3_EP_ENABLED)) {
1166 memset(&params, 0x00, sizeof(params));
1167 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001168 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001169 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1170
1171 dep->endpoint.desc = desc;
1172 dep->comp_desc = comp_desc;
1173 dep->type = usb_endpoint_type(desc);
1174 dep->flags |= DWC3_EP_ENABLED;
1175 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1176 reg |= DWC3_DALEPENA_EP(dep->number);
1177 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1178 }
1179
1180}
1181
1182/*
1183* Enables USB wrapper for GSI
1184*
1185* @usb_ep - pointer to usb_ep instance.
1186*/
1187static void gsi_enable(struct usb_ep *ep)
1188{
1189 struct dwc3_ep *dep = to_dwc3_ep(ep);
1190 struct dwc3 *dwc = dep->dwc;
1191 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1192
1193 dwc3_msm_write_reg_field(mdwc->base,
1194 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1195 dwc3_msm_write_reg_field(mdwc->base,
1196 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1197 dwc3_msm_write_reg_field(mdwc->base,
1198 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1199 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1200 dwc3_msm_write_reg_field(mdwc->base,
1201 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1202}
1203
1204/*
1205* Block or allow doorbell towards GSI
1206*
1207* @usb_ep - pointer to usb_ep instance.
1208* @request - pointer to GSI request. In this case num_bufs is used as a bool
1209* to set or clear the doorbell bit
1210*/
1211static void gsi_set_clear_dbell(struct usb_ep *ep,
1212 bool block_db)
1213{
1214
1215 struct dwc3_ep *dep = to_dwc3_ep(ep);
1216 struct dwc3 *dwc = dep->dwc;
1217 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1218
1219 dwc3_msm_write_reg_field(mdwc->base,
1220 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1221}
1222
1223/*
1224* Performs necessary checks before stopping GSI channels
1225*
1226* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1227*/
1228static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1229{
1230 u32 timeout = 1500;
1231 u32 reg = 0;
1232 struct dwc3_ep *dep = to_dwc3_ep(ep);
1233 struct dwc3 *dwc = dep->dwc;
1234 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1235
1236 while (dwc3_msm_read_reg_field(mdwc->base,
1237 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1238 if (!timeout--) {
1239 dev_err(mdwc->dev,
1240 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1241 return false;
1242 }
1243 }
1244 /* Check for U3 only if we are not handling Function Suspend */
1245 if (!f_suspend) {
1246 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1247 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1248 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1249 return false;
1250 }
1251 }
1252
1253 return true;
1254}
1255
1256
1257/**
1258* Performs GSI operations or GSI EP related operations.
1259*
1260* @usb_ep - pointer to usb_ep instance.
1261* @op_data - pointer to opcode related data.
1262* @op - GSI related or GSI EP related op code.
1263*
1264* @return int - 0 on success, negative on error.
1265* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1266*/
1267static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1268 void *op_data, enum gsi_ep_op op)
1269{
1270 u32 ret = 0;
1271 struct dwc3_ep *dep = to_dwc3_ep(ep);
1272 struct dwc3 *dwc = dep->dwc;
1273 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1274 struct usb_gsi_request *request;
1275 struct gsi_channel_info *ch_info;
1276 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001277 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001278
1279 switch (op) {
1280 case GSI_EP_OP_PREPARE_TRBS:
1281 request = (struct usb_gsi_request *)op_data;
1282 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1283 ret = gsi_prepare_trbs(ep, request);
1284 break;
1285 case GSI_EP_OP_FREE_TRBS:
1286 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1287 gsi_free_trbs(ep);
1288 break;
1289 case GSI_EP_OP_CONFIG:
1290 request = (struct usb_gsi_request *)op_data;
1291 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001292 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001293 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001294 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001295 break;
1296 case GSI_EP_OP_STARTXFER:
1297 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001298 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001299 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001300 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001301 break;
1302 case GSI_EP_OP_GET_XFER_IDX:
1303 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1304 ret = gsi_get_xfer_index(ep);
1305 break;
1306 case GSI_EP_OP_STORE_DBL_INFO:
1307 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1308 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1309 break;
1310 case GSI_EP_OP_ENABLE_GSI:
1311 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1312 gsi_enable(ep);
1313 break;
1314 case GSI_EP_OP_GET_CH_INFO:
1315 ch_info = (struct gsi_channel_info *)op_data;
1316 gsi_get_channel_info(ep, ch_info);
1317 break;
1318 case GSI_EP_OP_RING_IN_DB:
1319 request = (struct usb_gsi_request *)op_data;
1320 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1321 gsi_ring_in_db(ep, request);
1322 break;
1323 case GSI_EP_OP_UPDATEXFER:
1324 request = (struct usb_gsi_request *)op_data;
1325 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001326 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001327 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001328 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001329 break;
1330 case GSI_EP_OP_ENDXFER:
1331 request = (struct usb_gsi_request *)op_data;
1332 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001333 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001334 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001335 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001336 break;
1337 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1338 block_db = *((bool *)op_data);
1339 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1340 block_db);
1341 gsi_set_clear_dbell(ep, block_db);
1342 break;
1343 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1344 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1345 f_suspend = *((bool *)op_data);
1346 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1347 break;
1348 case GSI_EP_OP_DISABLE:
1349 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1350 ret = ep->ops->disable(ep);
1351 break;
1352 default:
1353 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1354 }
1355
1356 return ret;
1357}
1358
1359/**
1360 * Configure MSM endpoint.
1361 * This function do specific configurations
1362 * to an endpoint which need specific implementaion
1363 * in the MSM architecture.
1364 *
1365 * This function should be called by usb function/class
1366 * layer which need a support from the specific MSM HW
1367 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1368 *
1369 * @ep - a pointer to some usb_ep instance
1370 *
1371 * @return int - 0 on success, negetive on error.
1372 */
1373int msm_ep_config(struct usb_ep *ep)
1374{
1375 struct dwc3_ep *dep = to_dwc3_ep(ep);
1376 struct dwc3 *dwc = dep->dwc;
1377 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1378 struct usb_ep_ops *new_ep_ops;
1379
1380
1381 /* Save original ep ops for future restore*/
1382 if (mdwc->original_ep_ops[dep->number]) {
1383 dev_err(mdwc->dev,
1384 "ep [%s,%d] already configured as msm endpoint\n",
1385 ep->name, dep->number);
1386 return -EPERM;
1387 }
1388 mdwc->original_ep_ops[dep->number] = ep->ops;
1389
1390 /* Set new usb ops as we like */
1391 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1392 if (!new_ep_ops)
1393 return -ENOMEM;
1394
1395 (*new_ep_ops) = (*ep->ops);
1396 new_ep_ops->queue = dwc3_msm_ep_queue;
1397 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1398 ep->ops = new_ep_ops;
1399
1400 /*
1401 * Do HERE more usb endpoint configurations
1402 * which are specific to MSM.
1403 */
1404
1405 return 0;
1406}
1407EXPORT_SYMBOL(msm_ep_config);
1408
1409/**
1410 * Un-configure MSM endpoint.
1411 * Tear down configurations done in the
1412 * dwc3_msm_ep_config function.
1413 *
1414 * @ep - a pointer to some usb_ep instance
1415 *
1416 * @return int - 0 on success, negative on error.
1417 */
1418int msm_ep_unconfig(struct usb_ep *ep)
1419{
1420 struct dwc3_ep *dep = to_dwc3_ep(ep);
1421 struct dwc3 *dwc = dep->dwc;
1422 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1423 struct usb_ep_ops *old_ep_ops;
1424
1425 /* Restore original ep ops */
1426 if (!mdwc->original_ep_ops[dep->number]) {
1427 dev_err(mdwc->dev,
1428 "ep [%s,%d] was not configured as msm endpoint\n",
1429 ep->name, dep->number);
1430 return -EINVAL;
1431 }
1432 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1433 ep->ops = mdwc->original_ep_ops[dep->number];
1434 mdwc->original_ep_ops[dep->number] = NULL;
1435 kfree(old_ep_ops);
1436
1437 /*
1438 * Do HERE more usb endpoint un-configurations
1439 * which are specific to MSM.
1440 */
1441
1442 return 0;
1443}
1444EXPORT_SYMBOL(msm_ep_unconfig);
1445#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1446
1447static void dwc3_resume_work(struct work_struct *w);
1448
1449static void dwc3_restart_usb_work(struct work_struct *w)
1450{
1451 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1452 restart_usb_work);
1453 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1454 unsigned int timeout = 50;
1455
1456 dev_dbg(mdwc->dev, "%s\n", __func__);
1457
1458 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1459 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1460 return;
1461 }
1462
1463 /* guard against concurrent VBUS handling */
1464 mdwc->in_restart = true;
1465
1466 if (!mdwc->vbus_active) {
1467 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1468 dwc->err_evt_seen = false;
1469 mdwc->in_restart = false;
1470 return;
1471 }
1472
Mayank Rana08e41922017-03-02 15:25:48 -08001473 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001474 /* Reset active USB connection */
1475 dwc3_resume_work(&mdwc->resume_work);
1476
1477 /* Make sure disconnect is processed before sending connect */
1478 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1479 msleep(20);
1480
1481 if (!timeout) {
1482 dev_dbg(mdwc->dev,
1483 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001484 dbg_event(0xFF, "ReStart:RT SUSP",
1485 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001486 pm_runtime_suspend(mdwc->dev);
1487 }
1488
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301489 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001490 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301491 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001492 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001493
1494 dwc->err_evt_seen = false;
1495 flush_delayed_work(&mdwc->sm_work);
1496}
1497
1498/*
1499 * Check whether the DWC3 requires resetting the ep
1500 * after going to Low Power Mode (lpm)
1501 */
1502bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1503{
1504 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1505 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1506
1507 return dbm_reset_ep_after_lpm(mdwc->dbm);
1508}
1509EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1510
1511/*
1512 * Config Global Distributed Switch Controller (GDSC)
1513 * to support controller power collapse
1514 */
1515static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1516{
1517 int ret;
1518
1519 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1520 return -EPERM;
1521
1522 if (on) {
1523 ret = regulator_enable(mdwc->dwc3_gdsc);
1524 if (ret) {
1525 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1526 return ret;
1527 }
1528 } else {
1529 ret = regulator_disable(mdwc->dwc3_gdsc);
1530 if (ret) {
1531 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1532 return ret;
1533 }
1534 }
1535
1536 return ret;
1537}
1538
1539static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1540{
1541 int ret = 0;
1542
1543 if (assert) {
1544 disable_irq(mdwc->pwr_event_irq);
1545 /* Using asynchronous block reset to the hardware */
1546 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1547 clk_disable_unprepare(mdwc->utmi_clk);
1548 clk_disable_unprepare(mdwc->sleep_clk);
1549 clk_disable_unprepare(mdwc->core_clk);
1550 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301551 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001552 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301553 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001554 } else {
1555 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301556 ret = reset_control_deassert(mdwc->core_reset);
1557 if (ret)
1558 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001559 ndelay(200);
1560 clk_prepare_enable(mdwc->iface_clk);
1561 clk_prepare_enable(mdwc->core_clk);
1562 clk_prepare_enable(mdwc->sleep_clk);
1563 clk_prepare_enable(mdwc->utmi_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001564 enable_irq(mdwc->pwr_event_irq);
1565 }
1566
1567 return ret;
1568}
1569
1570static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1571{
1572 u32 guctl, gfladj = 0;
1573
1574 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1575 guctl &= ~DWC3_GUCTL_REFCLKPER;
1576
1577 /* GFLADJ register is used starting with revision 2.50a */
1578 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1579 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1580 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1581 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1582 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1583 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1584 }
1585
1586 /* Refer to SNPS Databook Table 6-55 for calculations used */
1587 switch (mdwc->utmi_clk_rate) {
1588 case 19200000:
1589 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1590 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1591 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1592 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1593 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1594 break;
1595 case 24000000:
1596 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1597 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1598 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1599 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1600 break;
1601 default:
1602 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1603 mdwc->utmi_clk_rate);
1604 break;
1605 }
1606
1607 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1608 if (gfladj)
1609 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1610}
1611
1612/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1613static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1614{
1615 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1616 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1617 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1618 BIT(2), 1);
1619
1620 /*
1621 * Enable master clock for RAMs to allow BAM to access RAMs when
1622 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1623 * are seen where RAM clocks get turned OFF in SS mode
1624 */
1625 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1626 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1627
1628}
1629
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001630static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1631{
1632 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1633 vbus_draw_work);
1634 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1635
1636 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1637}
1638
Mayank Rana511f3b22016-08-02 12:00:11 -07001639static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1640{
1641 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001642 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001643 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001644 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001645
1646 switch (event) {
1647 case DWC3_CONTROLLER_ERROR_EVENT:
1648 dev_info(mdwc->dev,
1649 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1650 dwc->irq_cnt);
1651
1652 dwc3_gadget_disable_irq(dwc);
1653
1654 /* prevent core from generating interrupts until recovery */
1655 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1656 reg |= DWC3_GCTL_CORESOFTRESET;
1657 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1658
1659 /* restart USB which performs full reset and reconnect */
1660 schedule_work(&mdwc->restart_usb_work);
1661 break;
1662 case DWC3_CONTROLLER_RESET_EVENT:
1663 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1664 /* HS & SSPHYs get reset as part of core soft reset */
1665 dwc3_msm_qscratch_reg_init(mdwc);
1666 break;
1667 case DWC3_CONTROLLER_POST_RESET_EVENT:
1668 dev_dbg(mdwc->dev,
1669 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1670
1671 /*
1672 * Below sequence is used when controller is working without
1673 * having ssphy and only USB high speed is supported.
1674 */
1675 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1676 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1677 dwc3_msm_read_reg(mdwc->base,
1678 QSCRATCH_GENERAL_CFG)
1679 | PIPE_UTMI_CLK_DIS);
1680
1681 usleep_range(2, 5);
1682
1683
1684 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1685 dwc3_msm_read_reg(mdwc->base,
1686 QSCRATCH_GENERAL_CFG)
1687 | PIPE_UTMI_CLK_SEL
1688 | PIPE3_PHYSTATUS_SW);
1689
1690 usleep_range(2, 5);
1691
1692 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1693 dwc3_msm_read_reg(mdwc->base,
1694 QSCRATCH_GENERAL_CFG)
1695 & ~PIPE_UTMI_CLK_DIS);
1696 }
1697
1698 dwc3_msm_update_ref_clk(mdwc);
1699 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1700 break;
1701 case DWC3_CONTROLLER_CONNDONE_EVENT:
1702 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1703 /*
1704 * Add power event if the dbm indicates coming out of L1 by
1705 * interrupt
1706 */
1707 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1708 dwc3_msm_write_reg_field(mdwc->base,
1709 PWR_EVNT_IRQ_MASK_REG,
1710 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1711
1712 atomic_set(&dwc->in_lpm, 0);
1713 break;
1714 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1715 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1716 if (dwc->enable_bus_suspend) {
1717 mdwc->suspend = dwc->b_suspend;
1718 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1719 }
1720 break;
1721 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1722 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001723 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001724 break;
1725 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1726 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001727 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001728 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001729 case DWC3_GSI_EVT_BUF_ALLOC:
1730 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1731
1732 if (!mdwc->num_gsi_event_buffers)
1733 break;
1734
1735 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1736 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1737 GFP_KERNEL);
1738 if (!mdwc->gsi_ev_buff) {
1739 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1740 break;
1741 }
1742
1743 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1744
1745 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1746 if (!evt)
1747 break;
1748 evt->dwc = dwc;
1749 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1750 evt->buf = dma_alloc_coherent(dwc->dev,
1751 DWC3_EVENT_BUFFERS_SIZE,
1752 &evt->dma, GFP_KERNEL);
1753 if (!evt->buf) {
1754 dev_err(dwc->dev,
1755 "can't allocate gsi_evt_buf(%d)\n", i);
1756 break;
1757 }
1758 mdwc->gsi_ev_buff[i] = evt;
1759 }
1760 break;
1761 case DWC3_GSI_EVT_BUF_SETUP:
1762 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1763 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1764 evt = mdwc->gsi_ev_buff[i];
1765 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1766 evt->buf, (unsigned long long) evt->dma,
1767 evt->length);
1768 memset(evt->buf, 0, evt->length);
1769 evt->lpos = 0;
1770 /*
1771 * Primary event buffer is programmed with registers
1772 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1773 * program USB GSI related event buffer with DWC3
1774 * controller.
1775 */
1776 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1777 lower_32_bits(evt->dma));
1778 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1779 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1780 DWC3_GEVENT_TYPE_GSI) |
1781 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1782 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1783 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1784 ((evt->length) & 0xffff));
1785 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1786 }
1787 break;
1788 case DWC3_GSI_EVT_BUF_CLEANUP:
1789 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
1790 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1791 evt = mdwc->gsi_ev_buff[i];
1792 evt->lpos = 0;
1793 /*
1794 * Primary event buffer is programmed with registers
1795 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1796 * program USB GSI related event buffer with DWC3
1797 * controller.
1798 */
1799 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1800 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1801 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1802 DWC3_GEVNTSIZ_INTMASK |
1803 DWC3_GEVNTSIZ_SIZE((i+1)));
1804 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1805 }
1806 break;
1807 case DWC3_GSI_EVT_BUF_FREE:
1808 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
1809 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1810 evt = mdwc->gsi_ev_buff[i];
1811 if (evt)
1812 dma_free_coherent(dwc->dev, evt->length,
1813 evt->buf, evt->dma);
1814 }
1815 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001816 default:
1817 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1818 break;
1819 }
1820}
1821
1822static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1823{
1824 int ret = 0;
1825
1826 if (core_reset) {
1827 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1828 if (ret)
1829 return;
1830
1831 usleep_range(1000, 1200);
1832 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1833 if (ret)
1834 return;
1835
1836 usleep_range(10000, 12000);
1837 }
1838
1839 if (mdwc->dbm) {
1840 /* Reset the DBM */
1841 dbm_soft_reset(mdwc->dbm, 1);
1842 usleep_range(1000, 1200);
1843 dbm_soft_reset(mdwc->dbm, 0);
1844
1845 /*enable DBM*/
1846 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1847 DBM_EN_MASK, 0x1);
1848 dbm_enable(mdwc->dbm);
1849 }
1850}
1851
1852static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1853{
1854 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1855 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301856 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001857
1858 /* Configure AHB2PHY for one wait state read/write */
1859 if (mdwc->ahb2phy_base) {
1860 clk_prepare_enable(mdwc->cfg_ahb_clk);
1861 val = readl_relaxed(mdwc->ahb2phy_base +
1862 PERIPH_SS_AHB2PHY_TOP_CFG);
1863 if (val != ONE_READ_WRITE_WAIT) {
1864 writel_relaxed(ONE_READ_WRITE_WAIT,
1865 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1866 /* complete above write before configuring USB PHY. */
1867 mb();
1868 }
1869 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1870 }
1871
1872 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001873 dbg_event(0xFF, "dwc3 init",
1874 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301875 ret = dwc3_core_pre_init(dwc);
1876 if (ret) {
1877 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1878 return;
1879 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001880 mdwc->init = true;
1881 }
1882
1883 dwc3_core_init(dwc);
1884 /* Re-configure event buffers */
1885 dwc3_event_buffers_setup(dwc);
1886}
1887
1888static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1889{
1890 unsigned long timeout;
1891 u32 reg = 0;
1892
1893 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301894 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001895 if (!atomic_read(&mdwc->in_p3)) {
1896 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1897 return -EBUSY;
1898 }
1899 }
1900
1901 /* Clear previous L2 events */
1902 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1903 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1904
1905 /* Prepare HSPHY for suspend */
1906 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1907 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1908 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1909
1910 /* Wait for PHY to go into L2 */
1911 timeout = jiffies + msecs_to_jiffies(5);
1912 while (!time_after(jiffies, timeout)) {
1913 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1914 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1915 break;
1916 }
1917 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1918 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1919
1920 /* Clear L2 event bit */
1921 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1922 PWR_EVNT_LPM_IN_L2_MASK);
1923
1924 return 0;
1925}
1926
1927static void dwc3_msm_bus_vote_w(struct work_struct *w)
1928{
1929 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1930 int ret;
1931
1932 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1933 mdwc->bus_vote);
1934 if (ret)
1935 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1936}
1937
1938static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1939{
1940 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1941 int i, num_ports;
1942 u32 reg;
1943
1944 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1945 if (mdwc->in_host_mode) {
1946 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1947 num_ports = HCS_MAX_PORTS(reg);
1948 for (i = 0; i < num_ports; i++) {
1949 reg = dwc3_msm_read_reg(mdwc->base,
1950 USB3_PORTSC + i*0x10);
1951 if (reg & PORT_PE) {
1952 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1953 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1954 else if (DEV_LOWSPEED(reg))
1955 mdwc->hs_phy->flags |= PHY_LS_MODE;
1956 }
1957 }
1958 } else {
1959 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1960 dwc->gadget.speed == USB_SPEED_FULL)
1961 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1962 else if (dwc->gadget.speed == USB_SPEED_LOW)
1963 mdwc->hs_phy->flags |= PHY_LS_MODE;
1964 }
1965}
1966
1967
1968static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1969{
Mayank Rana83ad5822016-08-09 14:17:22 -07001970 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001971 bool can_suspend_ssphy;
1972 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07001973 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001974
1975 if (atomic_read(&dwc->in_lpm)) {
1976 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1977 return 0;
1978 }
1979
1980 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07001981 evt = dwc->ev_buf;
1982 if ((evt->flags & DWC3_EVENT_PENDING)) {
1983 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001984 "%s: %d device events pending, abort suspend\n",
1985 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07001986 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07001987 }
1988 }
1989
1990 if (!mdwc->vbus_active && dwc->is_drd &&
1991 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
1992 /*
1993 * In some cases, the pm_runtime_suspend may be called by
1994 * usb_bam when there is pending lpm flag. However, if this is
1995 * done when cable was disconnected and otg state has not
1996 * yet changed to IDLE, then it means OTG state machine
1997 * is running and we race against it. So cancel LPM for now,
1998 * and OTG state machine will go for LPM later, after completing
1999 * transition to IDLE state.
2000 */
2001 dev_dbg(mdwc->dev,
2002 "%s: cable disconnected while not in idle otg state\n",
2003 __func__);
2004 return -EBUSY;
2005 }
2006
2007 /*
2008 * Check if device is not in CONFIGURED state
2009 * then check controller state of L2 and break
2010 * LPM sequence. Check this for device bus suspend case.
2011 */
2012 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2013 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2014 pr_err("%s(): Trying to go in LPM with state:%d\n",
2015 __func__, dwc->gadget.state);
2016 pr_err("%s(): LPM is not performed.\n", __func__);
2017 return -EBUSY;
2018 }
2019
2020 ret = dwc3_msm_prepare_suspend(mdwc);
2021 if (ret)
2022 return ret;
2023
2024 /* Initialize variables here */
2025 can_suspend_ssphy = !(mdwc->in_host_mode &&
2026 dwc3_msm_is_host_superspeed(mdwc));
2027
2028 /* Disable core irq */
2029 if (dwc->irq)
2030 disable_irq(dwc->irq);
2031
2032 /* disable power event irq, hs and ss phy irq is used as wake up src */
2033 disable_irq(mdwc->pwr_event_irq);
2034
2035 dwc3_set_phy_speed_flags(mdwc);
2036 /* Suspend HS PHY */
2037 usb_phy_set_suspend(mdwc->hs_phy, 1);
2038
2039 /* Suspend SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002040 if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002041 /* indicate phy about SS mode */
2042 if (dwc3_msm_is_superspeed(mdwc))
2043 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2044 usb_phy_set_suspend(mdwc->ss_phy, 1);
2045 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2046 }
2047
2048 /* make sure above writes are completed before turning off clocks */
2049 wmb();
2050
2051 /* Disable clocks */
2052 if (mdwc->bus_aggr_clk)
2053 clk_disable_unprepare(mdwc->bus_aggr_clk);
2054 clk_disable_unprepare(mdwc->utmi_clk);
2055
Hemant Kumar633dc332016-08-10 13:41:05 -07002056 /* Memory core: OFF, Memory periphery: OFF */
2057 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2058 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2059 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2060 }
2061
Mayank Rana511f3b22016-08-02 12:00:11 -07002062 clk_set_rate(mdwc->core_clk, 19200000);
2063 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302064 if (mdwc->noc_aggr_clk)
2065 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002066 /*
2067 * Disable iface_clk only after core_clk as core_clk has FSM
2068 * depedency on iface_clk. Hence iface_clk should be turned off
2069 * after core_clk is turned off.
2070 */
2071 clk_disable_unprepare(mdwc->iface_clk);
2072 /* USB PHY no more requires TCXO */
2073 clk_disable_unprepare(mdwc->xo_clk);
2074
2075 /* Perform controller power collapse */
2076 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2077 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2078 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2079 dwc3_msm_config_gdsc(mdwc, 0);
2080 clk_disable_unprepare(mdwc->sleep_clk);
2081 }
2082
2083 /* Remove bus voting */
2084 if (mdwc->bus_perf_client) {
2085 mdwc->bus_vote = 0;
2086 schedule_work(&mdwc->bus_vote_w);
2087 }
2088
2089 /*
2090 * release wakeup source with timeout to defer system suspend to
2091 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2092 * event is received.
2093 */
2094 if (mdwc->lpm_to_suspend_delay) {
2095 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2096 mdwc->lpm_to_suspend_delay);
2097 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2098 } else {
2099 pm_relax(mdwc->dev);
2100 }
2101
2102 atomic_set(&dwc->in_lpm, 1);
2103
2104 /*
2105 * with DCP or during cable disconnect, we dont require wakeup
2106 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2107 * case of host bus suspend and device bus suspend.
2108 */
2109 if (mdwc->vbus_active || mdwc->in_host_mode) {
2110 enable_irq_wake(mdwc->hs_phy_irq);
2111 enable_irq(mdwc->hs_phy_irq);
2112 if (mdwc->ss_phy_irq) {
2113 enable_irq_wake(mdwc->ss_phy_irq);
2114 enable_irq(mdwc->ss_phy_irq);
2115 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002116 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2117 }
2118
2119 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2120 return 0;
2121}
2122
2123static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2124{
2125 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002126 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002127 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2128
2129 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2130
2131 if (!atomic_read(&dwc->in_lpm)) {
2132 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2133 return 0;
2134 }
2135
2136 pm_stay_awake(mdwc->dev);
2137
2138 /* Enable bus voting */
2139 if (mdwc->bus_perf_client) {
2140 mdwc->bus_vote = 1;
2141 schedule_work(&mdwc->bus_vote_w);
2142 }
2143
2144 /* Vote for TCXO while waking up USB HSPHY */
2145 ret = clk_prepare_enable(mdwc->xo_clk);
2146 if (ret)
2147 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2148 __func__, ret);
2149
2150 /* Restore controller power collapse */
2151 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2152 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2153 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302154 ret = reset_control_assert(mdwc->core_reset);
2155 if (ret)
2156 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2157 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002158 /* HW requires a short delay for reset to take place properly */
2159 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302160 ret = reset_control_deassert(mdwc->core_reset);
2161 if (ret)
2162 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2163 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002164 clk_prepare_enable(mdwc->sleep_clk);
2165 }
2166
2167 /*
2168 * Enable clocks
2169 * Turned ON iface_clk before core_clk due to FSM depedency.
2170 */
2171 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302172 if (mdwc->noc_aggr_clk)
2173 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002174
2175 core_clk_rate = mdwc->core_clk_rate;
2176 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2177 core_clk_rate = mdwc->core_clk_rate_hs;
2178 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2179 core_clk_rate);
2180 }
2181
2182 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002183 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002184
2185 /* set Memory core: ON, Memory periphery: ON */
2186 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2187 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2188
Mayank Rana511f3b22016-08-02 12:00:11 -07002189 clk_prepare_enable(mdwc->utmi_clk);
2190 if (mdwc->bus_aggr_clk)
2191 clk_prepare_enable(mdwc->bus_aggr_clk);
2192
2193 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002194 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2195 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002196 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2197 if (mdwc->typec_orientation == ORIENTATION_CC1)
2198 mdwc->ss_phy->flags |= PHY_LANE_A;
2199 if (mdwc->typec_orientation == ORIENTATION_CC2)
2200 mdwc->ss_phy->flags |= PHY_LANE_B;
2201 usb_phy_set_suspend(mdwc->ss_phy, 0);
2202 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2203 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2204 }
2205
2206 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2207 /* Resume HS PHY */
2208 usb_phy_set_suspend(mdwc->hs_phy, 0);
2209
2210 /* Recover from controller power collapse */
2211 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2212 u32 tmp;
2213
2214 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2215
2216 dwc3_msm_power_collapse_por(mdwc);
2217
2218 /* Get initial P3 status and enable IN_P3 event */
2219 tmp = dwc3_msm_read_reg_field(mdwc->base,
2220 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2221 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2222 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2223 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2224
2225 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2226 }
2227
2228 atomic_set(&dwc->in_lpm, 0);
2229
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302230 /* enable power evt irq for IN P3 detection */
2231 enable_irq(mdwc->pwr_event_irq);
2232
Mayank Rana511f3b22016-08-02 12:00:11 -07002233 /* Disable HSPHY auto suspend */
2234 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2235 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2236 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2237 DWC3_GUSB2PHYCFG_SUSPHY));
2238
2239 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2240 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2241 disable_irq_wake(mdwc->hs_phy_irq);
2242 disable_irq_nosync(mdwc->hs_phy_irq);
2243 if (mdwc->ss_phy_irq) {
2244 disable_irq_wake(mdwc->ss_phy_irq);
2245 disable_irq_nosync(mdwc->ss_phy_irq);
2246 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002247 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2248 }
2249
2250 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2251
Mayank Rana511f3b22016-08-02 12:00:11 -07002252 /* Enable core irq */
2253 if (dwc->irq)
2254 enable_irq(dwc->irq);
2255
2256 /*
2257 * Handle other power events that could not have been handled during
2258 * Low Power Mode
2259 */
2260 dwc3_pwr_event_handler(mdwc);
2261
Mayank Rana08e41922017-03-02 15:25:48 -08002262 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002263 return 0;
2264}
2265
2266/**
2267 * dwc3_ext_event_notify - callback to handle events from external transceiver
2268 *
2269 * Returns 0 on success
2270 */
2271static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2272{
2273 /* Flush processing any pending events before handling new ones */
2274 flush_delayed_work(&mdwc->sm_work);
2275
2276 if (mdwc->id_state == DWC3_ID_FLOAT) {
2277 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2278 set_bit(ID, &mdwc->inputs);
2279 } else {
2280 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2281 clear_bit(ID, &mdwc->inputs);
2282 }
2283
2284 if (mdwc->vbus_active && !mdwc->in_restart) {
2285 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2286 set_bit(B_SESS_VLD, &mdwc->inputs);
2287 } else {
2288 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2289 clear_bit(B_SESS_VLD, &mdwc->inputs);
2290 }
2291
2292 if (mdwc->suspend) {
2293 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2294 set_bit(B_SUSPEND, &mdwc->inputs);
2295 } else {
2296 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2297 clear_bit(B_SUSPEND, &mdwc->inputs);
2298 }
2299
2300 schedule_delayed_work(&mdwc->sm_work, 0);
2301}
2302
2303static void dwc3_resume_work(struct work_struct *w)
2304{
2305 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002306 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002307
2308 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2309
2310 /*
2311 * exit LPM first to meet resume timeline from device side.
2312 * resume_pending flag would prevent calling
2313 * dwc3_msm_resume() in case we are here due to system
2314 * wide resume without usb cable connected. This flag is set
2315 * only in case of power event irq in lpm.
2316 */
2317 if (mdwc->resume_pending) {
2318 dwc3_msm_resume(mdwc);
2319 mdwc->resume_pending = false;
2320 }
2321
Mayank Rana08e41922017-03-02 15:25:48 -08002322 if (atomic_read(&mdwc->pm_suspended)) {
2323 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002324 /* let pm resume kick in resume work later */
2325 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002326 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002327 dwc3_ext_event_notify(mdwc);
2328}
2329
2330static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2331{
2332 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2333 u32 irq_stat, irq_clear = 0;
2334
2335 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2336 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2337
2338 /* Check for P3 events */
2339 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2340 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2341 /* Can't tell if entered or exit P3, so check LINKSTATE */
2342 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2343 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2344 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2345 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2346
2347 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2348 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2349 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2350 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2351 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2352 atomic_set(&mdwc->in_p3, 0);
2353 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2354 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2355 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2356 atomic_set(&mdwc->in_p3, 1);
2357 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2358 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2359 }
2360
2361 /* Clear L2 exit */
2362 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2363 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2364 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2365 }
2366
2367 /* Handle exit from L1 events */
2368 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2369 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2370 __func__);
2371 if (usb_gadget_wakeup(&dwc->gadget))
2372 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2373 __func__);
2374 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2375 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2376 }
2377
2378 /* Unhandled events */
2379 if (irq_stat)
2380 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2381 __func__, irq_stat);
2382
2383 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2384}
2385
2386static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2387{
2388 struct dwc3_msm *mdwc = _mdwc;
2389 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2390
2391 dev_dbg(mdwc->dev, "%s\n", __func__);
2392
2393 if (atomic_read(&dwc->in_lpm))
2394 dwc3_resume_work(&mdwc->resume_work);
2395 else
2396 dwc3_pwr_event_handler(mdwc);
2397
Mayank Rana08e41922017-03-02 15:25:48 -08002398 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002399 return IRQ_HANDLED;
2400}
2401
2402static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2403{
2404 struct dwc3_msm *mdwc = data;
2405 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2406
2407 dwc->t_pwr_evt_irq = ktime_get();
2408 dev_dbg(mdwc->dev, "%s received\n", __func__);
2409 /*
2410 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2411 * which interrupts have been triggered, as the clocks are disabled.
2412 * Resume controller by waking up pwr event irq thread.After re-enabling
2413 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2414 * all other power events.
2415 */
2416 if (atomic_read(&dwc->in_lpm)) {
2417 /* set this to call dwc3_msm_resume() */
2418 mdwc->resume_pending = true;
2419 return IRQ_WAKE_THREAD;
2420 }
2421
2422 dwc3_pwr_event_handler(mdwc);
2423 return IRQ_HANDLED;
2424}
2425
2426static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2427 unsigned long action, void *hcpu)
2428{
2429 uint32_t cpu = (uintptr_t)hcpu;
2430 struct dwc3_msm *mdwc =
2431 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2432
2433 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2434 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2435 cpu_to_affin, mdwc->irq_to_affin);
2436 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2437 }
2438
2439 return NOTIFY_OK;
2440}
2441
2442static void dwc3_otg_sm_work(struct work_struct *w);
2443
2444static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2445{
2446 int ret;
2447
2448 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2449 if (IS_ERR(mdwc->dwc3_gdsc))
2450 mdwc->dwc3_gdsc = NULL;
2451
2452 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2453 if (IS_ERR(mdwc->xo_clk)) {
2454 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2455 __func__);
2456 ret = PTR_ERR(mdwc->xo_clk);
2457 return ret;
2458 }
2459 clk_set_rate(mdwc->xo_clk, 19200000);
2460
2461 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2462 if (IS_ERR(mdwc->iface_clk)) {
2463 dev_err(mdwc->dev, "failed to get iface_clk\n");
2464 ret = PTR_ERR(mdwc->iface_clk);
2465 return ret;
2466 }
2467
2468 /*
2469 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2470 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2471 * On newer platform it can run at 150MHz as well.
2472 */
2473 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2474 if (IS_ERR(mdwc->core_clk)) {
2475 dev_err(mdwc->dev, "failed to get core_clk\n");
2476 ret = PTR_ERR(mdwc->core_clk);
2477 return ret;
2478 }
2479
Amit Nischal4d278212016-06-06 17:54:34 +05302480 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2481 if (IS_ERR(mdwc->core_reset)) {
2482 dev_err(mdwc->dev, "failed to get core_reset\n");
2483 return PTR_ERR(mdwc->core_reset);
2484 }
2485
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302486 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302487 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302488 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2489 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302490 }
2491
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302492 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302493 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302494 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2495 mdwc->core_clk_rate);
2496 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2497 if (ret)
2498 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002499
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002500 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2501 (u32 *)&mdwc->core_clk_rate_hs)) {
2502 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2503 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2504 }
2505
Mayank Rana511f3b22016-08-02 12:00:11 -07002506 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2507 if (IS_ERR(mdwc->sleep_clk)) {
2508 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2509 ret = PTR_ERR(mdwc->sleep_clk);
2510 return ret;
2511 }
2512
2513 clk_set_rate(mdwc->sleep_clk, 32000);
2514 mdwc->utmi_clk_rate = 19200000;
2515 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2516 if (IS_ERR(mdwc->utmi_clk)) {
2517 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2518 ret = PTR_ERR(mdwc->utmi_clk);
2519 return ret;
2520 }
2521
2522 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2523 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2524 if (IS_ERR(mdwc->bus_aggr_clk))
2525 mdwc->bus_aggr_clk = NULL;
2526
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302527 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2528 if (IS_ERR(mdwc->noc_aggr_clk))
2529 mdwc->noc_aggr_clk = NULL;
2530
Mayank Rana511f3b22016-08-02 12:00:11 -07002531 if (of_property_match_string(mdwc->dev->of_node,
2532 "clock-names", "cfg_ahb_clk") >= 0) {
2533 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2534 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2535 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2536 mdwc->cfg_ahb_clk = NULL;
2537 if (ret != -EPROBE_DEFER)
2538 dev_err(mdwc->dev,
2539 "failed to get cfg_ahb_clk ret %d\n",
2540 ret);
2541 return ret;
2542 }
2543 }
2544
2545 return 0;
2546}
2547
2548static int dwc3_msm_id_notifier(struct notifier_block *nb,
2549 unsigned long event, void *ptr)
2550{
2551 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002552 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002553 struct extcon_dev *edev = ptr;
2554 enum dwc3_id_state id;
2555 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002556 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002557
2558 if (!edev) {
2559 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2560 goto done;
2561 }
2562
2563 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2564
2565 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2566
2567 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2568 if (cc_state < 0)
2569 mdwc->typec_orientation = ORIENTATION_NONE;
2570 else
2571 mdwc->typec_orientation =
2572 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2573
Mayank Rana08e41922017-03-02 15:25:48 -08002574 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002575
2576 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2577 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002578 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2579 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002580
Mayank Rana511f3b22016-08-02 12:00:11 -07002581 if (mdwc->id_state != id) {
2582 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002583 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002584 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2585 }
2586
2587done:
2588 return NOTIFY_DONE;
2589}
2590
2591static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2592 unsigned long event, void *ptr)
2593{
2594 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2595 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2596 struct extcon_dev *edev = ptr;
2597 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002598 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002599
2600 if (!edev) {
2601 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2602 goto done;
2603 }
2604
2605 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2606
2607 if (mdwc->vbus_active == event)
2608 return NOTIFY_DONE;
2609
2610 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2611 if (cc_state < 0)
2612 mdwc->typec_orientation = ORIENTATION_NONE;
2613 else
2614 mdwc->typec_orientation =
2615 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2616
Mayank Rana08e41922017-03-02 15:25:48 -08002617 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002618
2619 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2620 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002621 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2622 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002623
Mayank Rana511f3b22016-08-02 12:00:11 -07002624 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002625 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002626 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002627done:
2628 return NOTIFY_DONE;
2629}
2630
2631static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2632{
2633 struct device_node *node = mdwc->dev->of_node;
2634 struct extcon_dev *edev;
2635 int ret = 0;
2636
2637 if (!of_property_read_bool(node, "extcon"))
2638 return 0;
2639
2640 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2641 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2642 return PTR_ERR(edev);
2643
2644 if (!IS_ERR(edev)) {
2645 mdwc->extcon_vbus = edev;
2646 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2647 ret = extcon_register_notifier(edev, EXTCON_USB,
2648 &mdwc->vbus_nb);
2649 if (ret < 0) {
2650 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2651 return ret;
2652 }
2653 }
2654
2655 /* if a second phandle was provided, use it to get a separate edev */
2656 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2657 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2658 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2659 ret = PTR_ERR(edev);
2660 goto err;
2661 }
2662 }
2663
2664 if (!IS_ERR(edev)) {
2665 mdwc->extcon_id = edev;
2666 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2667 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2668 &mdwc->id_nb);
2669 if (ret < 0) {
2670 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2671 goto err;
2672 }
2673 }
2674
2675 return 0;
2676err:
2677 if (mdwc->extcon_vbus)
2678 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2679 &mdwc->vbus_nb);
2680 return ret;
2681}
2682
2683static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2684 char *buf)
2685{
2686 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2687
2688 if (mdwc->vbus_active)
2689 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2690 if (mdwc->id_state == DWC3_ID_GROUND)
2691 return snprintf(buf, PAGE_SIZE, "host\n");
2692
2693 return snprintf(buf, PAGE_SIZE, "none\n");
2694}
2695
2696static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2697 const char *buf, size_t count)
2698{
2699 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2700
2701 if (sysfs_streq(buf, "peripheral")) {
2702 mdwc->vbus_active = true;
2703 mdwc->id_state = DWC3_ID_FLOAT;
2704 } else if (sysfs_streq(buf, "host")) {
2705 mdwc->vbus_active = false;
2706 mdwc->id_state = DWC3_ID_GROUND;
2707 } else {
2708 mdwc->vbus_active = false;
2709 mdwc->id_state = DWC3_ID_FLOAT;
2710 }
2711
2712 dwc3_ext_event_notify(mdwc);
2713
2714 return count;
2715}
2716
2717static DEVICE_ATTR_RW(mode);
2718
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08002719static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
2720 char *buf)
2721{
2722 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2723 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2724
2725 return snprintf(buf, PAGE_SIZE, "%s\n",
2726 usb_speed_string(dwc->max_hw_supp_speed));
2727}
2728
2729static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
2730 const char *buf, size_t count)
2731{
2732 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2733 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2734 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
2735
2736 if (sysfs_streq(buf, "high"))
2737 req_speed = USB_SPEED_HIGH;
2738 else if (sysfs_streq(buf, "super"))
2739 req_speed = USB_SPEED_SUPER;
2740
2741 if (req_speed != USB_SPEED_UNKNOWN &&
2742 req_speed != dwc->max_hw_supp_speed) {
2743 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
2744 schedule_work(&mdwc->restart_usb_work);
2745 }
2746
2747 return count;
2748}
2749static DEVICE_ATTR_RW(speed);
2750
Mayank Rana511f3b22016-08-02 12:00:11 -07002751static int dwc3_msm_probe(struct platform_device *pdev)
2752{
2753 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2754 struct device *dev = &pdev->dev;
2755 struct dwc3_msm *mdwc;
2756 struct dwc3 *dwc;
2757 struct resource *res;
2758 void __iomem *tcsr;
2759 bool host_mode;
2760 int ret = 0;
2761 int ext_hub_reset_gpio;
2762 u32 val;
2763
2764 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2765 if (!mdwc)
2766 return -ENOMEM;
2767
2768 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2769 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2770 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2771 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2772 return -EOPNOTSUPP;
2773 }
2774 }
2775
2776 platform_set_drvdata(pdev, mdwc);
2777 mdwc->dev = &pdev->dev;
2778
2779 INIT_LIST_HEAD(&mdwc->req_complete_list);
2780 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2781 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2782 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07002783 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002784 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
2785
2786 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2787 if (!mdwc->dwc3_wq) {
2788 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2789 return -ENOMEM;
2790 }
2791
2792 /* Get all clks and gdsc reference */
2793 ret = dwc3_msm_get_clk_gdsc(mdwc);
2794 if (ret) {
2795 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2796 return ret;
2797 }
2798
2799 mdwc->id_state = DWC3_ID_FLOAT;
2800 set_bit(ID, &mdwc->inputs);
2801
2802 mdwc->charging_disabled = of_property_read_bool(node,
2803 "qcom,charging-disabled");
2804
2805 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2806 &mdwc->lpm_to_suspend_delay);
2807 if (ret) {
2808 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2809 mdwc->lpm_to_suspend_delay = 0;
2810 }
2811
2812 /*
2813 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2814 * DP and DM linestate transitions during low power mode.
2815 */
2816 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2817 if (mdwc->hs_phy_irq < 0) {
2818 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2819 ret = -EINVAL;
2820 goto err;
2821 } else {
2822 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2823 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2824 msm_dwc3_pwr_irq,
2825 msm_dwc3_pwr_irq_thread,
2826 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2827 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2828 if (ret) {
2829 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2830 ret);
2831 goto err;
2832 }
2833 }
2834
2835 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2836 if (mdwc->ss_phy_irq < 0) {
2837 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2838 } else {
2839 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2840 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2841 msm_dwc3_pwr_irq,
2842 msm_dwc3_pwr_irq_thread,
2843 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2844 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2845 if (ret) {
2846 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2847 ret);
2848 goto err;
2849 }
2850 }
2851
2852 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2853 if (mdwc->pwr_event_irq < 0) {
2854 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2855 ret = -EINVAL;
2856 goto err;
2857 } else {
2858 /* will be enabled in dwc3_msm_resume() */
2859 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
2860 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
2861 msm_dwc3_pwr_irq,
2862 msm_dwc3_pwr_irq_thread,
2863 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
2864 "msm_dwc3", mdwc);
2865 if (ret) {
2866 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
2867 ret);
2868 goto err;
2869 }
2870 }
2871
2872 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
2873 if (!res) {
2874 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
2875 } else {
2876 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
2877 resource_size(res));
2878 if (IS_ERR_OR_NULL(tcsr)) {
2879 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
2880 } else {
2881 /* Enable USB3 on the primary USB port. */
2882 writel_relaxed(0x1, tcsr);
2883 /*
2884 * Ensure that TCSR write is completed before
2885 * USB registers initialization.
2886 */
2887 mb();
2888 }
2889 }
2890
2891 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
2892 if (!res) {
2893 dev_err(&pdev->dev, "missing memory base resource\n");
2894 ret = -ENODEV;
2895 goto err;
2896 }
2897
2898 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
2899 resource_size(res));
2900 if (!mdwc->base) {
2901 dev_err(&pdev->dev, "ioremap failed\n");
2902 ret = -ENODEV;
2903 goto err;
2904 }
2905
2906 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2907 "ahb2phy_base");
2908 if (res) {
2909 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
2910 res->start, resource_size(res));
2911 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
2912 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
2913 mdwc->ahb2phy_base = NULL;
2914 } else {
2915 /*
2916 * On some targets cfg_ahb_clk depends upon usb gdsc
2917 * regulator. If cfg_ahb_clk is enabled without
2918 * turning on usb gdsc regulator clk is stuck off.
2919 */
2920 dwc3_msm_config_gdsc(mdwc, 1);
2921 clk_prepare_enable(mdwc->cfg_ahb_clk);
2922 /* Configure AHB2PHY for one wait state read/write*/
2923 val = readl_relaxed(mdwc->ahb2phy_base +
2924 PERIPH_SS_AHB2PHY_TOP_CFG);
2925 if (val != ONE_READ_WRITE_WAIT) {
2926 writel_relaxed(ONE_READ_WRITE_WAIT,
2927 mdwc->ahb2phy_base +
2928 PERIPH_SS_AHB2PHY_TOP_CFG);
2929 /* complete above write before using USB PHY */
2930 mb();
2931 }
2932 clk_disable_unprepare(mdwc->cfg_ahb_clk);
2933 dwc3_msm_config_gdsc(mdwc, 0);
2934 }
2935 }
2936
2937 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
2938 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
2939 if (IS_ERR(mdwc->dbm)) {
2940 dev_err(&pdev->dev, "unable to get dbm device\n");
2941 ret = -EPROBE_DEFER;
2942 goto err;
2943 }
2944 /*
2945 * Add power event if the dbm indicates coming out of L1
2946 * by interrupt
2947 */
2948 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
2949 if (!mdwc->pwr_event_irq) {
2950 dev_err(&pdev->dev,
2951 "need pwr_event_irq exiting L1\n");
2952 ret = -EINVAL;
2953 goto err;
2954 }
2955 }
2956 }
2957
2958 ext_hub_reset_gpio = of_get_named_gpio(node,
2959 "qcom,ext-hub-reset-gpio", 0);
2960
2961 if (gpio_is_valid(ext_hub_reset_gpio)
2962 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
2963 "qcom,ext-hub-reset-gpio"))) {
2964 /* reset external hub */
2965 gpio_direction_output(ext_hub_reset_gpio, 1);
2966 /*
2967 * Hub reset should be asserted for minimum 5microsec
2968 * before deasserting.
2969 */
2970 usleep_range(5, 1000);
2971 gpio_direction_output(ext_hub_reset_gpio, 0);
2972 }
2973
2974 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
2975 &mdwc->tx_fifo_size))
2976 dev_err(&pdev->dev,
2977 "unable to read platform data tx fifo size\n");
2978
2979 mdwc->disable_host_mode_pm = of_property_read_bool(node,
2980 "qcom,disable-host-mode-pm");
2981
2982 dwc3_set_notifier(&dwc3_msm_notify_event);
2983
2984 /* Assumes dwc3 is the first DT child of dwc3-msm */
2985 dwc3_node = of_get_next_available_child(node, NULL);
2986 if (!dwc3_node) {
2987 dev_err(&pdev->dev, "failed to find dwc3 child\n");
2988 ret = -ENODEV;
2989 goto err;
2990 }
2991
2992 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2993 if (ret) {
2994 dev_err(&pdev->dev,
2995 "failed to add create dwc3 core\n");
2996 of_node_put(dwc3_node);
2997 goto err;
2998 }
2999
3000 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3001 of_node_put(dwc3_node);
3002 if (!mdwc->dwc3) {
3003 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3004 goto put_dwc3;
3005 }
3006
3007 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3008 "usb-phy", 0);
3009 if (IS_ERR(mdwc->hs_phy)) {
3010 dev_err(&pdev->dev, "unable to get hsphy device\n");
3011 ret = PTR_ERR(mdwc->hs_phy);
3012 goto put_dwc3;
3013 }
3014 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3015 "usb-phy", 1);
3016 if (IS_ERR(mdwc->ss_phy)) {
3017 dev_err(&pdev->dev, "unable to get ssphy device\n");
3018 ret = PTR_ERR(mdwc->ss_phy);
3019 goto put_dwc3;
3020 }
3021
3022 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3023 if (mdwc->bus_scale_table) {
3024 mdwc->bus_perf_client =
3025 msm_bus_scale_register_client(mdwc->bus_scale_table);
3026 }
3027
3028 dwc = platform_get_drvdata(mdwc->dwc3);
3029 if (!dwc) {
3030 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3031 goto put_dwc3;
3032 }
3033
3034 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3035 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3036
3037 if (cpu_to_affin)
3038 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3039
Mayank Ranaf4918d32016-12-15 13:35:55 -08003040 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3041 &mdwc->num_gsi_event_buffers);
3042
Mayank Rana511f3b22016-08-02 12:00:11 -07003043 /*
3044 * Clocks and regulators will not be turned on until the first time
3045 * runtime PM resume is called. This is to allow for booting up with
3046 * charger already connected so as not to disturb PHY line states.
3047 */
3048 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3049 atomic_set(&dwc->in_lpm, 1);
3050 pm_runtime_set_suspended(mdwc->dev);
3051 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3052 pm_runtime_use_autosuspend(mdwc->dev);
3053 pm_runtime_enable(mdwc->dev);
3054 device_init_wakeup(mdwc->dev, 1);
3055
3056 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3057 pm_runtime_get_noresume(mdwc->dev);
3058
3059 ret = dwc3_msm_extcon_register(mdwc);
3060 if (ret)
3061 goto put_dwc3;
3062
3063 /* Update initial VBUS/ID state from extcon */
3064 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
3065 EXTCON_USB))
3066 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
3067 if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
3068 EXTCON_USB_HOST))
3069 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
3070
3071 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003072 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003073
3074 schedule_delayed_work(&mdwc->sm_work, 0);
3075
3076 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3077 if (!dwc->is_drd && host_mode) {
3078 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3079 mdwc->id_state = DWC3_ID_GROUND;
3080 dwc3_ext_event_notify(mdwc);
3081 }
3082
3083 return 0;
3084
3085put_dwc3:
3086 platform_device_put(mdwc->dwc3);
3087 if (mdwc->bus_perf_client)
3088 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3089err:
3090 return ret;
3091}
3092
3093static int dwc3_msm_remove_children(struct device *dev, void *data)
3094{
3095 device_unregister(dev);
3096 return 0;
3097}
3098
3099static int dwc3_msm_remove(struct platform_device *pdev)
3100{
3101 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003102 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003103 int ret_pm;
3104
3105 device_remove_file(&pdev->dev, &dev_attr_mode);
3106
3107 if (cpu_to_affin)
3108 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3109
3110 /*
3111 * In case of system suspend, pm_runtime_get_sync fails.
3112 * Hence turn ON the clocks manually.
3113 */
3114 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003115 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003116 if (ret_pm < 0) {
3117 dev_err(mdwc->dev,
3118 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303119 if (mdwc->noc_aggr_clk)
3120 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003121 clk_prepare_enable(mdwc->utmi_clk);
3122 clk_prepare_enable(mdwc->core_clk);
3123 clk_prepare_enable(mdwc->iface_clk);
3124 clk_prepare_enable(mdwc->sleep_clk);
3125 if (mdwc->bus_aggr_clk)
3126 clk_prepare_enable(mdwc->bus_aggr_clk);
3127 clk_prepare_enable(mdwc->xo_clk);
3128 }
3129
3130 cancel_delayed_work_sync(&mdwc->sm_work);
3131
3132 if (mdwc->hs_phy)
3133 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3134 platform_device_put(mdwc->dwc3);
3135 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
3136
Mayank Rana08e41922017-03-02 15:25:48 -08003137 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003138 pm_runtime_disable(mdwc->dev);
3139 pm_runtime_barrier(mdwc->dev);
3140 pm_runtime_put_sync(mdwc->dev);
3141 pm_runtime_set_suspended(mdwc->dev);
3142 device_wakeup_disable(mdwc->dev);
3143
3144 if (mdwc->bus_perf_client)
3145 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3146
3147 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3148 regulator_disable(mdwc->vbus_reg);
3149
3150 disable_irq(mdwc->hs_phy_irq);
3151 if (mdwc->ss_phy_irq)
3152 disable_irq(mdwc->ss_phy_irq);
3153 disable_irq(mdwc->pwr_event_irq);
3154
3155 clk_disable_unprepare(mdwc->utmi_clk);
3156 clk_set_rate(mdwc->core_clk, 19200000);
3157 clk_disable_unprepare(mdwc->core_clk);
3158 clk_disable_unprepare(mdwc->iface_clk);
3159 clk_disable_unprepare(mdwc->sleep_clk);
3160 clk_disable_unprepare(mdwc->xo_clk);
3161 clk_put(mdwc->xo_clk);
3162
3163 dwc3_msm_config_gdsc(mdwc, 0);
3164
3165 return 0;
3166}
3167
Jack Pham4d4e9342016-12-07 19:25:02 -08003168static int dwc3_msm_host_notifier(struct notifier_block *nb,
3169 unsigned long event, void *ptr)
3170{
3171 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3172 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3173 struct usb_device *udev = ptr;
3174 union power_supply_propval pval;
3175 unsigned int max_power;
3176
3177 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3178 return NOTIFY_DONE;
3179
3180 if (!mdwc->usb_psy) {
3181 mdwc->usb_psy = power_supply_get_by_name("usb");
3182 if (!mdwc->usb_psy)
3183 return NOTIFY_DONE;
3184 }
3185
3186 /*
3187 * For direct-attach devices, new udev is direct child of root hub
3188 * i.e. dwc -> xhci -> root_hub -> udev
3189 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3190 */
3191 if (udev->parent && !udev->parent->parent &&
3192 udev->dev.parent->parent == &dwc->xhci->dev) {
3193 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003194 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3195 /*
3196 * Core clock rate can be reduced only if root
3197 * hub SS port is not enabled/connected.
3198 */
3199 clk_set_rate(mdwc->core_clk,
3200 mdwc->core_clk_rate_hs);
3201 dev_dbg(mdwc->dev,
3202 "set hs core clk rate %ld\n",
3203 mdwc->core_clk_rate_hs);
3204 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3205 } else {
3206 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3207 }
3208
Jack Pham4d4e9342016-12-07 19:25:02 -08003209 if (udev->speed >= USB_SPEED_SUPER)
3210 max_power = udev->actconfig->desc.bMaxPower * 8;
3211 else
3212 max_power = udev->actconfig->desc.bMaxPower * 2;
3213 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3214 dev_name(&udev->dev), max_power);
3215
3216 /* inform PMIC of max power so it can optimize boost */
3217 pval.intval = max_power * 1000;
3218 power_supply_set_property(mdwc->usb_psy,
3219 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3220 } else {
3221 pval.intval = 0;
3222 power_supply_set_property(mdwc->usb_psy,
3223 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003224
3225 /* set rate back to default core clk rate */
3226 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3227 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3228 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003229 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003230 }
3231 }
3232
3233 return NOTIFY_DONE;
3234}
3235
Mayank Rana511f3b22016-08-02 12:00:11 -07003236#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3237
3238/**
3239 * dwc3_otg_start_host - helper function for starting/stoping the host
3240 * controller driver.
3241 *
3242 * @mdwc: Pointer to the dwc3_msm structure.
3243 * @on: start / stop the host controller driver.
3244 *
3245 * Returns 0 on success otherwise negative errno.
3246 */
3247static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3248{
3249 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3250 int ret = 0;
3251
3252 if (!dwc->xhci)
3253 return -EINVAL;
3254
3255 /*
3256 * The vbus_reg pointer could have multiple values
3257 * NULL: regulator_get() hasn't been called, or was previously deferred
3258 * IS_ERR: regulator could not be obtained, so skip using it
3259 * Valid pointer otherwise
3260 */
3261 if (!mdwc->vbus_reg) {
3262 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3263 "vbus_dwc3");
3264 if (IS_ERR(mdwc->vbus_reg) &&
3265 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3266 /* regulators may not be ready, so retry again later */
3267 mdwc->vbus_reg = NULL;
3268 return -EPROBE_DEFER;
3269 }
3270 }
3271
3272 if (on) {
3273 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3274
Mayank Rana511f3b22016-08-02 12:00:11 -07003275 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Hemant Kumarde1df692016-04-26 19:36:48 -07003276 if (dwc->maximum_speed == USB_SPEED_SUPER)
3277 mdwc->ss_phy->flags |= PHY_HOST_MODE;
3278
3279 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003280 dbg_event(0xFF, "StrtHost gync",
3281 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003282 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3283 if (!IS_ERR(mdwc->vbus_reg))
3284 ret = regulator_enable(mdwc->vbus_reg);
3285 if (ret) {
3286 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3287 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3288 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3289 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003290 dbg_event(0xFF, "vregerr psync",
3291 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003292 return ret;
3293 }
3294
3295 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3296
Jack Pham4d4e9342016-12-07 19:25:02 -08003297 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3298 usb_register_notify(&mdwc->host_nb);
3299
Mayank Rana511f3b22016-08-02 12:00:11 -07003300 /*
3301 * FIXME If micro A cable is disconnected during system suspend,
3302 * xhci platform device will be removed before runtime pm is
3303 * enabled for xhci device. Due to this, disable_depth becomes
3304 * greater than one and runtimepm is not enabled for next microA
3305 * connect. Fix this by calling pm_runtime_init for xhci device.
3306 */
3307 pm_runtime_init(&dwc->xhci->dev);
3308 ret = platform_device_add(dwc->xhci);
3309 if (ret) {
3310 dev_err(mdwc->dev,
3311 "%s: failed to add XHCI pdev ret=%d\n",
3312 __func__, ret);
3313 if (!IS_ERR(mdwc->vbus_reg))
3314 regulator_disable(mdwc->vbus_reg);
3315 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3316 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3317 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003318 dbg_event(0xFF, "pdeverr psync",
3319 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003320 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003321 return ret;
3322 }
3323
3324 /*
3325 * In some cases it is observed that USB PHY is not going into
3326 * suspend with host mode suspend functionality. Hence disable
3327 * XHCI's runtime PM here if disable_host_mode_pm is set.
3328 */
3329 if (mdwc->disable_host_mode_pm)
3330 pm_runtime_disable(&dwc->xhci->dev);
3331
3332 mdwc->in_host_mode = true;
3333 dwc3_usb3_phy_suspend(dwc, true);
3334
3335 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003336 dbg_event(0xFF, "StrtHost psync",
3337 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003338 pm_runtime_mark_last_busy(mdwc->dev);
3339 pm_runtime_put_sync_autosuspend(mdwc->dev);
3340 } else {
3341 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3342
3343 if (!IS_ERR(mdwc->vbus_reg))
3344 ret = regulator_disable(mdwc->vbus_reg);
3345 if (ret) {
3346 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3347 return ret;
3348 }
3349
3350 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003351 dbg_event(0xFF, "StopHost gsync",
3352 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003353 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3354 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3355 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3356 platform_device_del(dwc->xhci);
Jack Pham4d4e9342016-12-07 19:25:02 -08003357 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003358
3359 /*
3360 * Perform USB hardware RESET (both core reset and DBM reset)
3361 * when moving from host to peripheral. This is required for
3362 * peripheral mode to work.
3363 */
3364 dwc3_msm_block_reset(mdwc, true);
3365
3366 dwc3_usb3_phy_suspend(dwc, false);
3367 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3368
3369 mdwc->in_host_mode = false;
3370
3371 /* re-init core and OTG registers as block reset clears these */
3372 dwc3_post_host_reset_core_init(dwc);
3373 pm_runtime_mark_last_busy(mdwc->dev);
3374 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003375 dbg_event(0xFF, "StopHost psync",
3376 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003377 }
3378
3379 return 0;
3380}
3381
3382static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3383{
3384 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3385
3386 /* Update OTG VBUS Valid from HSPHY to controller */
3387 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3388 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3389 UTMI_OTG_VBUS_VALID,
3390 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3391
3392 /* Update only if Super Speed is supported */
3393 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3394 /* Update VBUS Valid from SSPHY to controller */
3395 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3396 LANE0_PWR_PRESENT,
3397 vbus_present ? LANE0_PWR_PRESENT : 0);
3398 }
3399}
3400
3401/**
3402 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3403 *
3404 * @mdwc: Pointer to the dwc3_msm structure.
3405 * @on: Turn ON/OFF the gadget.
3406 *
3407 * Returns 0 on success otherwise negative errno.
3408 */
3409static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3410{
3411 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3412
3413 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003414 dbg_event(0xFF, "StrtGdgt gsync",
3415 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003416
3417 if (on) {
3418 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3419 __func__, dwc->gadget.name);
3420
3421 dwc3_override_vbus_status(mdwc, true);
3422 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3423 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3424
3425 /*
3426 * Core reset is not required during start peripheral. Only
3427 * DBM reset is required, hence perform only DBM reset here.
3428 */
3429 dwc3_msm_block_reset(mdwc, false);
3430
3431 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3432 usb_gadget_vbus_connect(&dwc->gadget);
3433 } else {
3434 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3435 __func__, dwc->gadget.name);
3436 usb_gadget_vbus_disconnect(&dwc->gadget);
3437 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3438 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3439 dwc3_override_vbus_status(mdwc, false);
3440 dwc3_usb3_phy_suspend(dwc, false);
3441 }
3442
3443 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003444 dbg_event(0xFF, "StopGdgt psync",
3445 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003446
3447 return 0;
3448}
3449
3450static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3451{
Jack Pham8caff352016-08-19 16:33:55 -07003452 union power_supply_propval pval = {0};
Jack Phamd72bafe2016-08-09 11:07:22 -07003453 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003454
3455 if (mdwc->charging_disabled)
3456 return 0;
3457
3458 if (mdwc->max_power == mA)
3459 return 0;
3460
3461 if (!mdwc->usb_psy) {
3462 mdwc->usb_psy = power_supply_get_by_name("usb");
3463 if (!mdwc->usb_psy) {
3464 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3465 return -ENODEV;
3466 }
3467 }
3468
Jack Pham8caff352016-08-19 16:33:55 -07003469 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
3470 if (pval.intval != POWER_SUPPLY_TYPE_USB)
3471 return 0;
3472
Mayank Rana511f3b22016-08-02 12:00:11 -07003473 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3474
Mayank Rana511f3b22016-08-02 12:00:11 -07003475 /* Set max current limit in uA */
Jack Pham8caff352016-08-19 16:33:55 -07003476 pval.intval = 1000 * mA;
Jack Phamd72bafe2016-08-09 11:07:22 -07003477 ret = power_supply_set_property(mdwc->usb_psy,
3478 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3479 if (ret) {
3480 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3481 return ret;
3482 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003483
3484 mdwc->max_power = mA;
3485 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003486}
3487
3488
3489/**
3490 * dwc3_otg_sm_work - workqueue function.
3491 *
3492 * @w: Pointer to the dwc3 otg workqueue
3493 *
3494 * NOTE: After any change in otg_state, we must reschdule the state machine.
3495 */
3496static void dwc3_otg_sm_work(struct work_struct *w)
3497{
3498 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3499 struct dwc3 *dwc = NULL;
3500 bool work = 0;
3501 int ret = 0;
3502 unsigned long delay = 0;
3503 const char *state;
3504
3505 if (mdwc->dwc3)
3506 dwc = platform_get_drvdata(mdwc->dwc3);
3507
3508 if (!dwc) {
3509 dev_err(mdwc->dev, "dwc is NULL.\n");
3510 return;
3511 }
3512
3513 state = usb_otg_state_string(mdwc->otg_state);
3514 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08003515 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003516
3517 /* Check OTG state */
3518 switch (mdwc->otg_state) {
3519 case OTG_STATE_UNDEFINED:
3520 /* Do nothing if no cable connected */
3521 if (test_bit(ID, &mdwc->inputs) &&
3522 !test_bit(B_SESS_VLD, &mdwc->inputs))
3523 break;
3524
Mayank Rana08e41922017-03-02 15:25:48 -08003525 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003526 mdwc->otg_state = OTG_STATE_B_IDLE;
3527 /* fall-through */
3528 case OTG_STATE_B_IDLE:
3529 if (!test_bit(ID, &mdwc->inputs)) {
3530 dev_dbg(mdwc->dev, "!id\n");
3531 mdwc->otg_state = OTG_STATE_A_IDLE;
3532 work = 1;
3533 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3534 dev_dbg(mdwc->dev, "b_sess_vld\n");
3535 /*
3536 * Increment pm usage count upon cable connect. Count
3537 * is decremented in OTG_STATE_B_PERIPHERAL state on
3538 * cable disconnect or in bus suspend.
3539 */
3540 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003541 dbg_event(0xFF, "BIDLE gsync",
3542 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003543 dwc3_otg_start_peripheral(mdwc, 1);
3544 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3545 work = 1;
3546 } else {
3547 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3548 dev_dbg(mdwc->dev, "Cable disconnected\n");
3549 }
3550 break;
3551
3552 case OTG_STATE_B_PERIPHERAL:
3553 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3554 !test_bit(ID, &mdwc->inputs)) {
3555 dev_dbg(mdwc->dev, "!id || !bsv\n");
3556 mdwc->otg_state = OTG_STATE_B_IDLE;
3557 dwc3_otg_start_peripheral(mdwc, 0);
3558 /*
3559 * Decrement pm usage count upon cable disconnect
3560 * which was incremented upon cable connect in
3561 * OTG_STATE_B_IDLE state
3562 */
3563 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003564 dbg_event(0xFF, "!BSV psync",
3565 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003566 work = 1;
3567 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3568 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3569 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3570 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3571 /*
3572 * Decrement pm usage count upon bus suspend.
3573 * Count was incremented either upon cable
3574 * connect in OTG_STATE_B_IDLE or host
3575 * initiated resume after bus suspend in
3576 * OTG_STATE_B_SUSPEND state
3577 */
3578 pm_runtime_mark_last_busy(mdwc->dev);
3579 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003580 dbg_event(0xFF, "SUSP put",
3581 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003582 }
3583 break;
3584
3585 case OTG_STATE_B_SUSPEND:
3586 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3587 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3588 mdwc->otg_state = OTG_STATE_B_IDLE;
3589 dwc3_otg_start_peripheral(mdwc, 0);
3590 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3591 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3592 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3593 /*
3594 * Increment pm usage count upon host
3595 * initiated resume. Count was decremented
3596 * upon bus suspend in
3597 * OTG_STATE_B_PERIPHERAL state.
3598 */
3599 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003600 dbg_event(0xFF, "!SUSP gsync",
3601 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003602 }
3603 break;
3604
3605 case OTG_STATE_A_IDLE:
3606 /* Switch to A-Device*/
3607 if (test_bit(ID, &mdwc->inputs)) {
3608 dev_dbg(mdwc->dev, "id\n");
3609 mdwc->otg_state = OTG_STATE_B_IDLE;
3610 mdwc->vbus_retry_count = 0;
3611 work = 1;
3612 } else {
3613 mdwc->otg_state = OTG_STATE_A_HOST;
3614 ret = dwc3_otg_start_host(mdwc, 1);
3615 if ((ret == -EPROBE_DEFER) &&
3616 mdwc->vbus_retry_count < 3) {
3617 /*
3618 * Get regulator failed as regulator driver is
3619 * not up yet. Will try to start host after 1sec
3620 */
3621 mdwc->otg_state = OTG_STATE_A_IDLE;
3622 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3623 delay = VBUS_REG_CHECK_DELAY;
3624 work = 1;
3625 mdwc->vbus_retry_count++;
3626 } else if (ret) {
3627 dev_err(mdwc->dev, "unable to start host\n");
3628 mdwc->otg_state = OTG_STATE_A_IDLE;
3629 goto ret;
3630 }
3631 }
3632 break;
3633
3634 case OTG_STATE_A_HOST:
3635 if (test_bit(ID, &mdwc->inputs)) {
3636 dev_dbg(mdwc->dev, "id\n");
3637 dwc3_otg_start_host(mdwc, 0);
3638 mdwc->otg_state = OTG_STATE_B_IDLE;
3639 mdwc->vbus_retry_count = 0;
3640 work = 1;
3641 } else {
3642 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003643 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003644 if (dwc)
3645 pm_runtime_resume(&dwc->xhci->dev);
3646 }
3647 break;
3648
3649 default:
3650 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3651
3652 }
3653
3654 if (work)
3655 schedule_delayed_work(&mdwc->sm_work, delay);
3656
3657ret:
3658 return;
3659}
3660
3661#ifdef CONFIG_PM_SLEEP
3662static int dwc3_msm_pm_suspend(struct device *dev)
3663{
3664 int ret = 0;
3665 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3666 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3667
3668 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003669 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003670
3671 flush_workqueue(mdwc->dwc3_wq);
3672 if (!atomic_read(&dwc->in_lpm)) {
3673 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3674 return -EBUSY;
3675 }
3676
3677 ret = dwc3_msm_suspend(mdwc);
3678 if (!ret)
3679 atomic_set(&mdwc->pm_suspended, 1);
3680
3681 return ret;
3682}
3683
3684static int dwc3_msm_pm_resume(struct device *dev)
3685{
3686 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003687 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003688
3689 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003690 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003691
Mayank Rana511f3b22016-08-02 12:00:11 -07003692 /* flush to avoid race in read/write of pm_suspended */
3693 flush_workqueue(mdwc->dwc3_wq);
3694 atomic_set(&mdwc->pm_suspended, 0);
3695
3696 /* kick in otg state machine */
3697 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3698
3699 return 0;
3700}
3701#endif
3702
3703#ifdef CONFIG_PM
3704static int dwc3_msm_runtime_idle(struct device *dev)
3705{
Mayank Rana08e41922017-03-02 15:25:48 -08003706 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3707 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3708
Mayank Rana511f3b22016-08-02 12:00:11 -07003709 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003710 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003711
3712 return 0;
3713}
3714
3715static int dwc3_msm_runtime_suspend(struct device *dev)
3716{
3717 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003718 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003719
3720 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003721 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003722
3723 return dwc3_msm_suspend(mdwc);
3724}
3725
3726static int dwc3_msm_runtime_resume(struct device *dev)
3727{
3728 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003729 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003730
3731 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003732 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003733
3734 return dwc3_msm_resume(mdwc);
3735}
3736#endif
3737
3738static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3739 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3740 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3741 dwc3_msm_runtime_idle)
3742};
3743
3744static const struct of_device_id of_dwc3_matach[] = {
3745 {
3746 .compatible = "qcom,dwc-usb3-msm",
3747 },
3748 { },
3749};
3750MODULE_DEVICE_TABLE(of, of_dwc3_matach);
3751
3752static struct platform_driver dwc3_msm_driver = {
3753 .probe = dwc3_msm_probe,
3754 .remove = dwc3_msm_remove,
3755 .driver = {
3756 .name = "msm-dwc3",
3757 .pm = &dwc3_msm_dev_pm_ops,
3758 .of_match_table = of_dwc3_matach,
3759 },
3760};
3761
3762MODULE_LICENSE("GPL v2");
3763MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
3764
3765static int dwc3_msm_init(void)
3766{
3767 return platform_driver_register(&dwc3_msm_driver);
3768}
3769module_init(dwc3_msm_init);
3770
3771static void __exit dwc3_msm_exit(void)
3772{
3773 platform_driver_unregister(&dwc3_msm_driver);
3774}
3775module_exit(dwc3_msm_exit);