blob: f56bed65664f891d159069a71f601418cff2f9e7 [file] [log] [blame]
Mayank Rana511f3b22016-08-02 12:00:11 -07001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/delay.h>
30#include <linux/of.h>
31#include <linux/of_platform.h>
32#include <linux/of_gpio.h>
33#include <linux/list.h>
34#include <linux/uaccess.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070038#include <linux/regulator/consumer.h>
39#include <linux/pm_wakeup.h>
40#include <linux/power_supply.h>
41#include <linux/cdev.h>
42#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070043#include <linux/msm-bus.h>
44#include <linux/irq.h>
45#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053046#include <linux/reset.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070047
48#include "power.h"
49#include "core.h"
50#include "gadget.h"
51#include "dbm.h"
52#include "debug.h"
53#include "xhci.h"
54
55/* time out to wait for USB cable status notification (in ms)*/
56#define SM_INIT_TIMEOUT 30000
57
58/* AHB2PHY register offsets */
59#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
60
61/* AHB2PHY read/write waite value */
62#define ONE_READ_WRITE_WAIT 0x11
63
64/* cpu to fix usb interrupt */
65static int cpu_to_affin;
66module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
67MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
68
69/* XHCI registers */
70#define USB3_HCSPARAMS1 (0x4)
71#define USB3_PORTSC (0x420)
72
73/**
74 * USB QSCRATCH Hardware registers
75 *
76 */
77#define QSCRATCH_REG_OFFSET (0x000F8800)
78#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
79#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
80#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
81#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
82
83#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
84#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
85#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
86#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
87#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
88
89/* QSCRATCH_GENERAL_CFG register bit offset */
90#define PIPE_UTMI_CLK_SEL BIT(0)
91#define PIPE3_PHYSTATUS_SW BIT(3)
92#define PIPE_UTMI_CLK_DIS BIT(8)
93
94#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
95#define UTMI_OTG_VBUS_VALID BIT(20)
96#define SW_SESSVLD_SEL BIT(28)
97
98#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
99#define LANE0_PWR_PRESENT BIT(24)
100
101/* GSI related registers */
102#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
103#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
104
105#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
106#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
107#define GSI_CLK_EN_MASK BIT(12)
108#define BLOCK_GSI_WR_GO_MASK BIT(1)
109#define GSI_EN_MASK BIT(0)
110
111#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
112#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
113#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
114#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
115
116#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
117#define GSI_WR_CTRL_STATE_MASK BIT(15)
118
119struct dwc3_msm_req_complete {
120 struct list_head list_item;
121 struct usb_request *req;
122 void (*orig_complete)(struct usb_ep *ep,
123 struct usb_request *req);
124};
125
126enum dwc3_id_state {
127 DWC3_ID_GROUND = 0,
128 DWC3_ID_FLOAT,
129};
130
131/* for type c cable */
132enum plug_orientation {
133 ORIENTATION_NONE,
134 ORIENTATION_CC1,
135 ORIENTATION_CC2,
136};
137
138/* Input bits to state machine (mdwc->inputs) */
139
140#define ID 0
141#define B_SESS_VLD 1
142#define B_SUSPEND 2
143
144struct dwc3_msm {
145 struct device *dev;
146 void __iomem *base;
147 void __iomem *ahb2phy_base;
148 struct platform_device *dwc3;
149 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
150 struct list_head req_complete_list;
151 struct clk *xo_clk;
152 struct clk *core_clk;
153 long core_clk_rate;
154 struct clk *iface_clk;
155 struct clk *sleep_clk;
156 struct clk *utmi_clk;
157 unsigned int utmi_clk_rate;
158 struct clk *utmi_clk_src;
159 struct clk *bus_aggr_clk;
160 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530161 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700162 struct regulator *dwc3_gdsc;
163
164 struct usb_phy *hs_phy, *ss_phy;
165
166 struct dbm *dbm;
167
168 /* VBUS regulator for host mode */
169 struct regulator *vbus_reg;
170 int vbus_retry_count;
171 bool resume_pending;
172 atomic_t pm_suspended;
173 int hs_phy_irq;
174 int ss_phy_irq;
175 struct work_struct resume_work;
176 struct work_struct restart_usb_work;
177 bool in_restart;
178 struct workqueue_struct *dwc3_wq;
179 struct delayed_work sm_work;
180 unsigned long inputs;
181 unsigned int max_power;
182 bool charging_disabled;
183 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700184 struct work_struct bus_vote_w;
185 unsigned int bus_vote;
186 u32 bus_perf_client;
187 struct msm_bus_scale_pdata *bus_scale_table;
188 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700189 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700190 bool in_host_mode;
191 unsigned int tx_fifo_size;
192 bool vbus_active;
193 bool suspend;
194 bool disable_host_mode_pm;
195 enum dwc3_id_state id_state;
196 unsigned long lpm_flags;
197#define MDWC3_SS_PHY_SUSPEND BIT(0)
198#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
199#define MDWC3_POWER_COLLAPSE BIT(2)
200
201 unsigned int irq_to_affin;
202 struct notifier_block dwc3_cpu_notifier;
203
204 struct extcon_dev *extcon_vbus;
205 struct extcon_dev *extcon_id;
206 struct notifier_block vbus_nb;
207 struct notifier_block id_nb;
208
209 int pwr_event_irq;
210 atomic_t in_p3;
211 unsigned int lpm_to_suspend_delay;
212 bool init;
213 enum plug_orientation typec_orientation;
214};
215
216#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
217#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
218#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
219
220#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
221#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
222#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
223
224#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
225#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
226#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
227
228#define DSTS_CONNECTSPD_SS 0x4
229
230
231static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
232static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
233
234/**
235 *
236 * Read register with debug info.
237 *
238 * @base - DWC3 base virtual address.
239 * @offset - register offset.
240 *
241 * @return u32
242 */
243static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
244{
245 u32 val = ioread32(base + offset);
246 return val;
247}
248
249/**
250 * Read register masked field with debug info.
251 *
252 * @base - DWC3 base virtual address.
253 * @offset - register offset.
254 * @mask - register bitmask.
255 *
256 * @return u32
257 */
258static inline u32 dwc3_msm_read_reg_field(void *base,
259 u32 offset,
260 const u32 mask)
261{
262 u32 shift = find_first_bit((void *)&mask, 32);
263 u32 val = ioread32(base + offset);
264
265 val &= mask; /* clear other bits */
266 val >>= shift;
267 return val;
268}
269
270/**
271 *
272 * Write register with debug info.
273 *
274 * @base - DWC3 base virtual address.
275 * @offset - register offset.
276 * @val - value to write.
277 *
278 */
279static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
280{
281 iowrite32(val, base + offset);
282}
283
284/**
285 * Write register masked field with debug info.
286 *
287 * @base - DWC3 base virtual address.
288 * @offset - register offset.
289 * @mask - register bitmask.
290 * @val - value to write.
291 *
292 */
293static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
294 const u32 mask, u32 val)
295{
296 u32 shift = find_first_bit((void *)&mask, 32);
297 u32 tmp = ioread32(base + offset);
298
299 tmp &= ~mask; /* clear written bits */
300 val = tmp | (val << shift);
301 iowrite32(val, base + offset);
302}
303
304/**
305 * Write register and read back masked value to confirm it is written
306 *
307 * @base - DWC3 base virtual address.
308 * @offset - register offset.
309 * @mask - register bitmask specifying what should be updated
310 * @val - value to write.
311 *
312 */
313static inline void dwc3_msm_write_readback(void *base, u32 offset,
314 const u32 mask, u32 val)
315{
316 u32 write_val, tmp = ioread32(base + offset);
317
318 tmp &= ~mask; /* retain other bits */
319 write_val = tmp | val;
320
321 iowrite32(write_val, base + offset);
322
323 /* Read back to see if val was written */
324 tmp = ioread32(base + offset);
325 tmp &= mask; /* clear other bits */
326
327 if (tmp != val)
328 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
329 __func__, val, offset);
330}
331
332static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
333{
334 int i, num_ports;
335 u32 reg;
336
337 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
338 num_ports = HCS_MAX_PORTS(reg);
339
340 for (i = 0; i < num_ports; i++) {
341 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
342 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
343 return true;
344 }
345
346 return false;
347}
348
349static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
350{
351 u8 speed;
352
353 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
354 return !!(speed & DSTS_CONNECTSPD_SS);
355}
356
357static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
358{
359 if (mdwc->in_host_mode)
360 return dwc3_msm_is_host_superspeed(mdwc);
361
362 return dwc3_msm_is_dev_superspeed(mdwc);
363}
364
365#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
366/**
367 * Configure the DBM with the BAM's data fifo.
368 * This function is called by the USB BAM Driver
369 * upon initialization.
370 *
371 * @ep - pointer to usb endpoint.
372 * @addr - address of data fifo.
373 * @size - size of data fifo.
374 *
375 */
376int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
377 u32 size, u8 dst_pipe_idx)
378{
379 struct dwc3_ep *dep = to_dwc3_ep(ep);
380 struct dwc3 *dwc = dep->dwc;
381 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
382
383 dev_dbg(mdwc->dev, "%s\n", __func__);
384
385 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
386 dst_pipe_idx);
387}
388
389
390/**
391* Cleanups for msm endpoint on request complete.
392*
393* Also call original request complete.
394*
395* @usb_ep - pointer to usb_ep instance.
396* @request - pointer to usb_request instance.
397*
398* @return int - 0 on success, negative on error.
399*/
400static void dwc3_msm_req_complete_func(struct usb_ep *ep,
401 struct usb_request *request)
402{
403 struct dwc3_ep *dep = to_dwc3_ep(ep);
404 struct dwc3 *dwc = dep->dwc;
405 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
406 struct dwc3_msm_req_complete *req_complete = NULL;
407
408 /* Find original request complete function and remove it from list */
409 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
410 if (req_complete->req == request)
411 break;
412 }
413 if (!req_complete || req_complete->req != request) {
414 dev_err(dep->dwc->dev, "%s: could not find the request\n",
415 __func__);
416 return;
417 }
418 list_del(&req_complete->list_item);
419
420 /*
421 * Release another one TRB to the pool since DBM queue took 2 TRBs
422 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
423 * released only one.
424 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700425 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700426
427 /* Unconfigure dbm ep */
428 dbm_ep_unconfig(mdwc->dbm, dep->number);
429
430 /*
431 * If this is the last endpoint we unconfigured, than reset also
432 * the event buffers; unless unconfiguring the ep due to lpm,
433 * in which case the event buffer only gets reset during the
434 * block reset.
435 */
436 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
437 !dbm_reset_ep_after_lpm(mdwc->dbm))
438 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
439
440 /*
441 * Call original complete function, notice that dwc->lock is already
442 * taken by the caller of this function (dwc3_gadget_giveback()).
443 */
444 request->complete = req_complete->orig_complete;
445 if (request->complete)
446 request->complete(ep, request);
447
448 kfree(req_complete);
449}
450
451
452/**
453* Helper function
454*
455* Reset DBM endpoint.
456*
457* @mdwc - pointer to dwc3_msm instance.
458* @dep - pointer to dwc3_ep instance.
459*
460* @return int - 0 on success, negative on error.
461*/
462static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
463{
464 int ret;
465
466 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
467
468 /* Reset the dbm endpoint */
469 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
470 if (ret) {
471 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
472 __func__);
473 return ret;
474 }
475
476 /*
477 * The necessary delay between asserting and deasserting the dbm ep
478 * reset is based on the number of active endpoints. If there is more
479 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
480 * delay will suffice.
481 */
482 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
483 usleep_range(1000, 1200);
484 else
485 udelay(10);
486 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
487 if (ret) {
488 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
489 __func__);
490 return ret;
491 }
492
493 return 0;
494}
495
496/**
497* Reset the DBM endpoint which is linked to the given USB endpoint.
498*
499* @usb_ep - pointer to usb_ep instance.
500*
501* @return int - 0 on success, negative on error.
502*/
503
504int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
505{
506 struct dwc3_ep *dep = to_dwc3_ep(ep);
507 struct dwc3 *dwc = dep->dwc;
508 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
509
510 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
511}
512EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
513
514
515/**
516* Helper function.
517* See the header of the dwc3_msm_ep_queue function.
518*
519* @dwc3_ep - pointer to dwc3_ep instance.
520* @req - pointer to dwc3_request instance.
521*
522* @return int - 0 on success, negative on error.
523*/
524static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
525{
526 struct dwc3_trb *trb;
527 struct dwc3_trb *trb_link;
528 struct dwc3_gadget_ep_cmd_params params;
529 u32 cmd;
530 int ret = 0;
531
Mayank Rana83ad5822016-08-09 14:17:22 -0700532 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700533 * this request is issued with start transfer. The request will be out
534 * from this list in 2 cases. The first is that the transfer will be
535 * completed (not if the transfer is endless using a circular TRBs with
536 * with link TRB). The second case is an option to do stop stransfer,
537 * this can be initiated by the function driver when calling dequeue.
538 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700539 req->started = true;
540 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700541
542 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700543 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
544 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700545 memset(trb, 0, sizeof(*trb));
546
547 req->trb = trb;
548 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
549 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
550 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
551 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
552 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
553
554 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700555 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
556 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700557 memset(trb_link, 0, sizeof(*trb_link));
558
559 trb_link->bpl = lower_32_bits(req->trb_dma);
560 trb_link->bph = DBM_TRB_BIT |
561 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
562 trb_link->size = 0;
563 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
564
565 /*
566 * Now start the transfer
567 */
568 memset(&params, 0, sizeof(params));
569 params.param0 = 0; /* TDAddr High */
570 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
571
572 /* DBM requires IOC to be set */
573 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700574 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700575 if (ret < 0) {
576 dev_dbg(dep->dwc->dev,
577 "%s: failed to send STARTTRANSFER command\n",
578 __func__);
579
580 list_del(&req->list);
581 return ret;
582 }
583 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700584 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700585
586 return ret;
587}
588
589/**
590* Queue a usb request to the DBM endpoint.
591* This function should be called after the endpoint
592* was enabled by the ep_enable.
593*
594* This function prepares special structure of TRBs which
595* is familiar with the DBM HW, so it will possible to use
596* this endpoint in DBM mode.
597*
598* The TRBs prepared by this function, is one normal TRB
599* which point to a fake buffer, followed by a link TRB
600* that points to the first TRB.
601*
602* The API of this function follow the regular API of
603* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
604*
605* @usb_ep - pointer to usb_ep instance.
606* @request - pointer to usb_request instance.
607* @gfp_flags - possible flags.
608*
609* @return int - 0 on success, negative on error.
610*/
611static int dwc3_msm_ep_queue(struct usb_ep *ep,
612 struct usb_request *request, gfp_t gfp_flags)
613{
614 struct dwc3_request *req = to_dwc3_request(request);
615 struct dwc3_ep *dep = to_dwc3_ep(ep);
616 struct dwc3 *dwc = dep->dwc;
617 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
618 struct dwc3_msm_req_complete *req_complete;
619 unsigned long flags;
620 int ret = 0, size;
621 u8 bam_pipe;
622 bool producer;
623 bool disable_wb;
624 bool internal_mem;
625 bool ioc;
626 bool superspeed;
627
628 if (!(request->udc_priv & MSM_SPS_MODE)) {
629 /* Not SPS mode, call original queue */
630 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
631 __func__);
632
633 return (mdwc->original_ep_ops[dep->number])->queue(ep,
634 request,
635 gfp_flags);
636 }
637
638 /* HW restriction regarding TRB size (8KB) */
639 if (req->request.length < 0x2000) {
640 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
641 return -EINVAL;
642 }
643
644 /*
645 * Override req->complete function, but before doing that,
646 * store it's original pointer in the req_complete_list.
647 */
648 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
649 if (!req_complete)
650 return -ENOMEM;
651
652 req_complete->req = request;
653 req_complete->orig_complete = request->complete;
654 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
655 request->complete = dwc3_msm_req_complete_func;
656
657 /*
658 * Configure the DBM endpoint
659 */
660 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
661 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
662 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
663 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
664 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
665
666 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
667 disable_wb, internal_mem, ioc);
668 if (ret < 0) {
669 dev_err(mdwc->dev,
670 "error %d after calling dbm_ep_config\n", ret);
671 return ret;
672 }
673
674 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
675 __func__, request, ep->name, request->length);
676 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
677 dbm_event_buffer_config(mdwc->dbm,
678 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
679 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
680 DWC3_GEVNTSIZ_SIZE(size));
681
682 /*
683 * We must obtain the lock of the dwc3 core driver,
684 * including disabling interrupts, so we will be sure
685 * that we are the only ones that configure the HW device
686 * core and ensure that we queuing the request will finish
687 * as soon as possible so we will release back the lock.
688 */
689 spin_lock_irqsave(&dwc->lock, flags);
690 if (!dep->endpoint.desc) {
691 dev_err(mdwc->dev,
692 "%s: trying to queue request %p to disabled ep %s\n",
693 __func__, request, ep->name);
694 ret = -EPERM;
695 goto err;
696 }
697
698 if (dep->number == 0 || dep->number == 1) {
699 dev_err(mdwc->dev,
700 "%s: trying to queue dbm request %p to control ep %s\n",
701 __func__, request, ep->name);
702 ret = -EPERM;
703 goto err;
704 }
705
706
Mayank Rana83ad5822016-08-09 14:17:22 -0700707 if (dep->trb_dequeue != dep->trb_enqueue ||
708 !list_empty(&dep->pending_list)
709 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700710 dev_err(mdwc->dev,
711 "%s: trying to queue dbm request %p tp ep %s\n",
712 __func__, request, ep->name);
713 ret = -EPERM;
714 goto err;
715 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700716 dep->trb_dequeue = 0;
717 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700718 }
719
720 ret = __dwc3_msm_ep_queue(dep, req);
721 if (ret < 0) {
722 dev_err(mdwc->dev,
723 "error %d after calling __dwc3_msm_ep_queue\n", ret);
724 goto err;
725 }
726
727 spin_unlock_irqrestore(&dwc->lock, flags);
728 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
729 dbm_set_speed(mdwc->dbm, (u8)superspeed);
730
731 return 0;
732
733err:
734 spin_unlock_irqrestore(&dwc->lock, flags);
735 kfree(req_complete);
736 return ret;
737}
738
739/*
740* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
741*
742* @usb_ep - pointer to usb_ep instance.
743*
744* @return int - XferRscIndex
745*/
746static inline int gsi_get_xfer_index(struct usb_ep *ep)
747{
748 struct dwc3_ep *dep = to_dwc3_ep(ep);
749
750 return dep->resource_index;
751}
752
753/*
754* Fills up the GSI channel information needed in call to IPA driver
755* for GSI channel creation.
756*
757* @usb_ep - pointer to usb_ep instance.
758* @ch_info - output parameter with requested channel info
759*/
760static void gsi_get_channel_info(struct usb_ep *ep,
761 struct gsi_channel_info *ch_info)
762{
763 struct dwc3_ep *dep = to_dwc3_ep(ep);
764 int last_trb_index = 0;
765 struct dwc3 *dwc = dep->dwc;
766 struct usb_gsi_request *request = ch_info->ch_req;
767
768 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
769 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700770 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700771 ch_info->depcmd_hi_addr = 0;
772
773 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
774 &dep->trb_pool[0]);
775 /* Convert to multipled of 1KB */
776 ch_info->const_buffer_size = request->buf_len/1024;
777
778 /* IN direction */
779 if (dep->direction) {
780 /*
781 * Multiply by size of each TRB for xfer_ring_len in bytes.
782 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
783 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
784 */
785 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
786 last_trb_index = 2 * request->num_bufs + 2;
787 } else { /* OUT direction */
788 /*
789 * Multiply by size of each TRB for xfer_ring_len in bytes.
790 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
791 * LINK TRB.
792 */
793 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
794 last_trb_index = request->num_bufs + 1;
795 }
796
797 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
798 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
799 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
800 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
801 DWC3_GEVNTCOUNT(ep->ep_intr_num));
802 ch_info->gevntcount_hi_addr = 0;
803
804 dev_dbg(dwc->dev,
805 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
806 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
807 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
808}
809
810/*
811* Perform StartXfer on GSI EP. Stores XferRscIndex.
812*
813* @usb_ep - pointer to usb_ep instance.
814*
815* @return int - 0 on success
816*/
817static int gsi_startxfer_for_ep(struct usb_ep *ep)
818{
819 int ret;
820 struct dwc3_gadget_ep_cmd_params params;
821 u32 cmd;
822 struct dwc3_ep *dep = to_dwc3_ep(ep);
823 struct dwc3 *dwc = dep->dwc;
824
825 memset(&params, 0, sizeof(params));
826 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
827 params.param0 |= (ep->ep_intr_num << 16);
828 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
829 &dep->trb_pool[0]));
830 cmd = DWC3_DEPCMD_STARTTRANSFER;
831 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700832 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700833
834 if (ret < 0)
835 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700836 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700837 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
838 return ret;
839}
840
841/*
842* Store Ring Base and Doorbell Address for GSI EP
843* for GSI channel creation.
844*
845* @usb_ep - pointer to usb_ep instance.
846* @dbl_addr - Doorbell address obtained from IPA driver
847*/
848static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
849{
850 struct dwc3_ep *dep = to_dwc3_ep(ep);
851 struct dwc3 *dwc = dep->dwc;
852 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
853 int n = ep->ep_intr_num - 1;
854
855 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
856 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
857 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
858
859 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
860 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
861 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
862 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
863}
864
865/*
866* Rings Doorbell for IN GSI Channel
867*
868* @usb_ep - pointer to usb_ep instance.
869* @request - pointer to GSI request. This is used to pass in the
870* address of the GSI doorbell obtained from IPA driver
871*/
872static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
873{
874 void __iomem *gsi_dbl_address_lsb;
875 void __iomem *gsi_dbl_address_msb;
876 dma_addr_t offset;
877 u64 dbl_addr = *((u64 *)request->buf_base_addr);
878 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
879 u32 dbl_hi_addr = (dbl_addr >> 32);
880 u32 num_trbs = (request->num_bufs * 2 + 2);
881 struct dwc3_ep *dep = to_dwc3_ep(ep);
882 struct dwc3 *dwc = dep->dwc;
883 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
884
885 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
886 dbl_lo_addr, sizeof(u32));
887 if (!gsi_dbl_address_lsb)
888 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
889
890 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
891 dbl_hi_addr, sizeof(u32));
892 if (!gsi_dbl_address_msb)
893 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
894
895 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
896 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
897 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
898
899 writel_relaxed(offset, gsi_dbl_address_lsb);
900 writel_relaxed(0, gsi_dbl_address_msb);
901}
902
903/*
904* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
905*
906* @usb_ep - pointer to usb_ep instance.
907* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
908*
909* @return int - 0 on success
910*/
911static int gsi_updatexfer_for_ep(struct usb_ep *ep,
912 struct usb_gsi_request *request)
913{
914 int i;
915 int ret;
916 u32 cmd;
917 int num_trbs = request->num_bufs + 1;
918 struct dwc3_trb *trb;
919 struct dwc3_gadget_ep_cmd_params params;
920 struct dwc3_ep *dep = to_dwc3_ep(ep);
921 struct dwc3 *dwc = dep->dwc;
922
923 for (i = 0; i < num_trbs - 1; i++) {
924 trb = &dep->trb_pool[i];
925 trb->ctrl |= DWC3_TRB_CTRL_HWO;
926 }
927
928 memset(&params, 0, sizeof(params));
929 cmd = DWC3_DEPCMD_UPDATETRANSFER;
930 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700931 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700932 dep->flags |= DWC3_EP_BUSY;
933 if (ret < 0)
934 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
935 return ret;
936}
937
938/*
939* Perform EndXfer on particular GSI EP.
940*
941* @usb_ep - pointer to usb_ep instance.
942*/
943static void gsi_endxfer_for_ep(struct usb_ep *ep)
944{
945 struct dwc3_ep *dep = to_dwc3_ep(ep);
946 struct dwc3 *dwc = dep->dwc;
947
948 dwc3_stop_active_transfer(dwc, dep->number, true);
949}
950
951/*
952* Allocates and configures TRBs for GSI EPs.
953*
954* @usb_ep - pointer to usb_ep instance.
955* @request - pointer to GSI request.
956*
957* @return int - 0 on success
958*/
959static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
960{
961 int i = 0;
962 dma_addr_t buffer_addr = req->dma;
963 struct dwc3_ep *dep = to_dwc3_ep(ep);
964 struct dwc3 *dwc = dep->dwc;
965 struct dwc3_trb *trb;
966 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
967 : (req->num_bufs + 1);
968
969 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
970 num_trbs * sizeof(struct dwc3_trb),
971 num_trbs * sizeof(struct dwc3_trb), 0);
972 if (!dep->trb_dma_pool) {
973 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
974 dep->name);
975 return -ENOMEM;
976 }
977
978 dep->num_trbs = num_trbs;
979
980 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
981 GFP_KERNEL, &dep->trb_pool_dma);
982 if (!dep->trb_pool) {
983 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
984 dep->name);
985 return -ENOMEM;
986 }
987
988 /* IN direction */
989 if (dep->direction) {
990 for (i = 0; i < num_trbs ; i++) {
991 trb = &dep->trb_pool[i];
992 memset(trb, 0, sizeof(*trb));
993 /* Set up first n+1 TRBs for ZLPs */
994 if (i < (req->num_bufs + 1)) {
995 trb->bpl = 0;
996 trb->bph = 0;
997 trb->size = 0;
998 trb->ctrl = DWC3_TRBCTL_NORMAL
999 | DWC3_TRB_CTRL_IOC;
1000 continue;
1001 }
1002
1003 /* Setup n TRBs pointing to valid buffers */
1004 trb->bpl = lower_32_bits(buffer_addr);
1005 trb->bph = 0;
1006 trb->size = 0;
1007 trb->ctrl = DWC3_TRBCTL_NORMAL
1008 | DWC3_TRB_CTRL_IOC;
1009 buffer_addr += req->buf_len;
1010
1011 /* Set up the Link TRB at the end */
1012 if (i == (num_trbs - 1)) {
1013 trb->bpl = dwc3_trb_dma_offset(dep,
1014 &dep->trb_pool[0]);
1015 trb->bph = (1 << 23) | (1 << 21)
1016 | (ep->ep_intr_num << 16);
1017 trb->size = 0;
1018 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1019 | DWC3_TRB_CTRL_HWO;
1020 }
1021 }
1022 } else { /* OUT direction */
1023
1024 for (i = 0; i < num_trbs ; i++) {
1025
1026 trb = &dep->trb_pool[i];
1027 memset(trb, 0, sizeof(*trb));
1028 trb->bpl = lower_32_bits(buffer_addr);
1029 trb->bph = 0;
1030 trb->size = req->buf_len;
1031 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1032 | DWC3_TRB_CTRL_CSP
1033 | DWC3_TRB_CTRL_ISP_IMI;
1034 buffer_addr += req->buf_len;
1035
1036 /* Set up the Link TRB at the end */
1037 if (i == (num_trbs - 1)) {
1038 trb->bpl = dwc3_trb_dma_offset(dep,
1039 &dep->trb_pool[0]);
1040 trb->bph = (1 << 23) | (1 << 21)
1041 | (ep->ep_intr_num << 16);
1042 trb->size = 0;
1043 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1044 | DWC3_TRB_CTRL_HWO;
1045 }
1046 }
1047 }
1048 return 0;
1049}
1050
1051/*
1052* Frees TRBs for GSI EPs.
1053*
1054* @usb_ep - pointer to usb_ep instance.
1055*
1056*/
1057static void gsi_free_trbs(struct usb_ep *ep)
1058{
1059 struct dwc3_ep *dep = to_dwc3_ep(ep);
1060
1061 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1062 return;
1063
1064 /* Free TRBs and TRB pool for EP */
1065 if (dep->trb_dma_pool) {
1066 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1067 dep->trb_pool_dma);
1068 dma_pool_destroy(dep->trb_dma_pool);
1069 dep->trb_pool = NULL;
1070 dep->trb_pool_dma = 0;
1071 dep->trb_dma_pool = NULL;
1072 }
1073}
1074/*
1075* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1076*
1077* @usb_ep - pointer to usb_ep instance.
1078* @request - pointer to GSI request.
1079*/
1080static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1081{
1082 struct dwc3_ep *dep = to_dwc3_ep(ep);
1083 struct dwc3 *dwc = dep->dwc;
1084 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1085 struct dwc3_gadget_ep_cmd_params params;
1086 const struct usb_endpoint_descriptor *desc = ep->desc;
1087 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1088 u32 reg;
1089
1090 memset(&params, 0x00, sizeof(params));
1091
1092 /* Configure GSI EP */
1093 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1094 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1095
1096 /* Burst size is only needed in SuperSpeed mode */
1097 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1098 u32 burst = dep->endpoint.maxburst - 1;
1099
1100 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1101 }
1102
1103 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1104 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1105 | DWC3_DEPCFG_STREAM_EVENT_EN;
1106 dep->stream_capable = true;
1107 }
1108
1109 /* Set EP number */
1110 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1111
1112 /* Set interrupter number for GSI endpoints */
1113 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1114
1115 /* Enable XferInProgress and XferComplete Interrupts */
1116 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1117 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1118 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1119 /*
1120 * We must use the lower 16 TX FIFOs even though
1121 * HW might have more
1122 */
1123 /* Remove FIFO Number for GSI EP*/
1124 if (dep->direction)
1125 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1126
1127 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1128
1129 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1130 params.param0, params.param1, params.param2, dep->name);
1131
Mayank Rana83ad5822016-08-09 14:17:22 -07001132 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001133
1134 /* Set XferRsc Index for GSI EP */
1135 if (!(dep->flags & DWC3_EP_ENABLED)) {
1136 memset(&params, 0x00, sizeof(params));
1137 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001138 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001139 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1140
1141 dep->endpoint.desc = desc;
1142 dep->comp_desc = comp_desc;
1143 dep->type = usb_endpoint_type(desc);
1144 dep->flags |= DWC3_EP_ENABLED;
1145 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1146 reg |= DWC3_DALEPENA_EP(dep->number);
1147 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1148 }
1149
1150}
1151
1152/*
1153* Enables USB wrapper for GSI
1154*
1155* @usb_ep - pointer to usb_ep instance.
1156*/
1157static void gsi_enable(struct usb_ep *ep)
1158{
1159 struct dwc3_ep *dep = to_dwc3_ep(ep);
1160 struct dwc3 *dwc = dep->dwc;
1161 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1162
1163 dwc3_msm_write_reg_field(mdwc->base,
1164 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1165 dwc3_msm_write_reg_field(mdwc->base,
1166 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1167 dwc3_msm_write_reg_field(mdwc->base,
1168 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1169 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1170 dwc3_msm_write_reg_field(mdwc->base,
1171 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1172}
1173
1174/*
1175* Block or allow doorbell towards GSI
1176*
1177* @usb_ep - pointer to usb_ep instance.
1178* @request - pointer to GSI request. In this case num_bufs is used as a bool
1179* to set or clear the doorbell bit
1180*/
1181static void gsi_set_clear_dbell(struct usb_ep *ep,
1182 bool block_db)
1183{
1184
1185 struct dwc3_ep *dep = to_dwc3_ep(ep);
1186 struct dwc3 *dwc = dep->dwc;
1187 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1188
1189 dwc3_msm_write_reg_field(mdwc->base,
1190 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1191}
1192
1193/*
1194* Performs necessary checks before stopping GSI channels
1195*
1196* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1197*/
1198static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1199{
1200 u32 timeout = 1500;
1201 u32 reg = 0;
1202 struct dwc3_ep *dep = to_dwc3_ep(ep);
1203 struct dwc3 *dwc = dep->dwc;
1204 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1205
1206 while (dwc3_msm_read_reg_field(mdwc->base,
1207 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1208 if (!timeout--) {
1209 dev_err(mdwc->dev,
1210 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1211 return false;
1212 }
1213 }
1214 /* Check for U3 only if we are not handling Function Suspend */
1215 if (!f_suspend) {
1216 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1217 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1218 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1219 return false;
1220 }
1221 }
1222
1223 return true;
1224}
1225
1226
1227/**
1228* Performs GSI operations or GSI EP related operations.
1229*
1230* @usb_ep - pointer to usb_ep instance.
1231* @op_data - pointer to opcode related data.
1232* @op - GSI related or GSI EP related op code.
1233*
1234* @return int - 0 on success, negative on error.
1235* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1236*/
1237static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1238 void *op_data, enum gsi_ep_op op)
1239{
1240 u32 ret = 0;
1241 struct dwc3_ep *dep = to_dwc3_ep(ep);
1242 struct dwc3 *dwc = dep->dwc;
1243 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1244 struct usb_gsi_request *request;
1245 struct gsi_channel_info *ch_info;
1246 bool block_db, f_suspend;
1247
1248 switch (op) {
1249 case GSI_EP_OP_PREPARE_TRBS:
1250 request = (struct usb_gsi_request *)op_data;
1251 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1252 ret = gsi_prepare_trbs(ep, request);
1253 break;
1254 case GSI_EP_OP_FREE_TRBS:
1255 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1256 gsi_free_trbs(ep);
1257 break;
1258 case GSI_EP_OP_CONFIG:
1259 request = (struct usb_gsi_request *)op_data;
1260 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
1261 gsi_configure_ep(ep, request);
1262 break;
1263 case GSI_EP_OP_STARTXFER:
1264 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
1265 ret = gsi_startxfer_for_ep(ep);
1266 break;
1267 case GSI_EP_OP_GET_XFER_IDX:
1268 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1269 ret = gsi_get_xfer_index(ep);
1270 break;
1271 case GSI_EP_OP_STORE_DBL_INFO:
1272 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1273 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1274 break;
1275 case GSI_EP_OP_ENABLE_GSI:
1276 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1277 gsi_enable(ep);
1278 break;
1279 case GSI_EP_OP_GET_CH_INFO:
1280 ch_info = (struct gsi_channel_info *)op_data;
1281 gsi_get_channel_info(ep, ch_info);
1282 break;
1283 case GSI_EP_OP_RING_IN_DB:
1284 request = (struct usb_gsi_request *)op_data;
1285 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1286 gsi_ring_in_db(ep, request);
1287 break;
1288 case GSI_EP_OP_UPDATEXFER:
1289 request = (struct usb_gsi_request *)op_data;
1290 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
1291 ret = gsi_updatexfer_for_ep(ep, request);
1292 break;
1293 case GSI_EP_OP_ENDXFER:
1294 request = (struct usb_gsi_request *)op_data;
1295 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
1296 gsi_endxfer_for_ep(ep);
1297 break;
1298 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1299 block_db = *((bool *)op_data);
1300 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1301 block_db);
1302 gsi_set_clear_dbell(ep, block_db);
1303 break;
1304 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1305 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1306 f_suspend = *((bool *)op_data);
1307 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1308 break;
1309 case GSI_EP_OP_DISABLE:
1310 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1311 ret = ep->ops->disable(ep);
1312 break;
1313 default:
1314 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1315 }
1316
1317 return ret;
1318}
1319
1320/**
1321 * Configure MSM endpoint.
1322 * This function do specific configurations
1323 * to an endpoint which need specific implementaion
1324 * in the MSM architecture.
1325 *
1326 * This function should be called by usb function/class
1327 * layer which need a support from the specific MSM HW
1328 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1329 *
1330 * @ep - a pointer to some usb_ep instance
1331 *
1332 * @return int - 0 on success, negetive on error.
1333 */
1334int msm_ep_config(struct usb_ep *ep)
1335{
1336 struct dwc3_ep *dep = to_dwc3_ep(ep);
1337 struct dwc3 *dwc = dep->dwc;
1338 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1339 struct usb_ep_ops *new_ep_ops;
1340
1341
1342 /* Save original ep ops for future restore*/
1343 if (mdwc->original_ep_ops[dep->number]) {
1344 dev_err(mdwc->dev,
1345 "ep [%s,%d] already configured as msm endpoint\n",
1346 ep->name, dep->number);
1347 return -EPERM;
1348 }
1349 mdwc->original_ep_ops[dep->number] = ep->ops;
1350
1351 /* Set new usb ops as we like */
1352 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1353 if (!new_ep_ops)
1354 return -ENOMEM;
1355
1356 (*new_ep_ops) = (*ep->ops);
1357 new_ep_ops->queue = dwc3_msm_ep_queue;
1358 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1359 ep->ops = new_ep_ops;
1360
1361 /*
1362 * Do HERE more usb endpoint configurations
1363 * which are specific to MSM.
1364 */
1365
1366 return 0;
1367}
1368EXPORT_SYMBOL(msm_ep_config);
1369
1370/**
1371 * Un-configure MSM endpoint.
1372 * Tear down configurations done in the
1373 * dwc3_msm_ep_config function.
1374 *
1375 * @ep - a pointer to some usb_ep instance
1376 *
1377 * @return int - 0 on success, negative on error.
1378 */
1379int msm_ep_unconfig(struct usb_ep *ep)
1380{
1381 struct dwc3_ep *dep = to_dwc3_ep(ep);
1382 struct dwc3 *dwc = dep->dwc;
1383 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1384 struct usb_ep_ops *old_ep_ops;
1385
1386 /* Restore original ep ops */
1387 if (!mdwc->original_ep_ops[dep->number]) {
1388 dev_err(mdwc->dev,
1389 "ep [%s,%d] was not configured as msm endpoint\n",
1390 ep->name, dep->number);
1391 return -EINVAL;
1392 }
1393 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1394 ep->ops = mdwc->original_ep_ops[dep->number];
1395 mdwc->original_ep_ops[dep->number] = NULL;
1396 kfree(old_ep_ops);
1397
1398 /*
1399 * Do HERE more usb endpoint un-configurations
1400 * which are specific to MSM.
1401 */
1402
1403 return 0;
1404}
1405EXPORT_SYMBOL(msm_ep_unconfig);
1406#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1407
1408static void dwc3_resume_work(struct work_struct *w);
1409
1410static void dwc3_restart_usb_work(struct work_struct *w)
1411{
1412 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1413 restart_usb_work);
1414 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1415 unsigned int timeout = 50;
1416
1417 dev_dbg(mdwc->dev, "%s\n", __func__);
1418
1419 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1420 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1421 return;
1422 }
1423
1424 /* guard against concurrent VBUS handling */
1425 mdwc->in_restart = true;
1426
1427 if (!mdwc->vbus_active) {
1428 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1429 dwc->err_evt_seen = false;
1430 mdwc->in_restart = false;
1431 return;
1432 }
1433
Mayank Rana511f3b22016-08-02 12:00:11 -07001434 /* Reset active USB connection */
1435 dwc3_resume_work(&mdwc->resume_work);
1436
1437 /* Make sure disconnect is processed before sending connect */
1438 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1439 msleep(20);
1440
1441 if (!timeout) {
1442 dev_dbg(mdwc->dev,
1443 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001444 pm_runtime_suspend(mdwc->dev);
1445 }
1446
1447 /* Force reconnect only if cable is still connected */
1448 if (mdwc->vbus_active) {
1449 mdwc->in_restart = false;
1450 dwc3_resume_work(&mdwc->resume_work);
1451 }
1452
1453 dwc->err_evt_seen = false;
1454 flush_delayed_work(&mdwc->sm_work);
1455}
1456
1457/*
1458 * Check whether the DWC3 requires resetting the ep
1459 * after going to Low Power Mode (lpm)
1460 */
1461bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1462{
1463 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1464 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1465
1466 return dbm_reset_ep_after_lpm(mdwc->dbm);
1467}
1468EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1469
1470/*
1471 * Config Global Distributed Switch Controller (GDSC)
1472 * to support controller power collapse
1473 */
1474static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1475{
1476 int ret;
1477
1478 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1479 return -EPERM;
1480
1481 if (on) {
1482 ret = regulator_enable(mdwc->dwc3_gdsc);
1483 if (ret) {
1484 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1485 return ret;
1486 }
1487 } else {
1488 ret = regulator_disable(mdwc->dwc3_gdsc);
1489 if (ret) {
1490 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1491 return ret;
1492 }
1493 }
1494
1495 return ret;
1496}
1497
1498static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1499{
1500 int ret = 0;
1501
1502 if (assert) {
1503 disable_irq(mdwc->pwr_event_irq);
1504 /* Using asynchronous block reset to the hardware */
1505 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1506 clk_disable_unprepare(mdwc->utmi_clk);
1507 clk_disable_unprepare(mdwc->sleep_clk);
1508 clk_disable_unprepare(mdwc->core_clk);
1509 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301510 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001511 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301512 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001513 } else {
1514 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301515 ret = reset_control_deassert(mdwc->core_reset);
1516 if (ret)
1517 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001518 ndelay(200);
1519 clk_prepare_enable(mdwc->iface_clk);
1520 clk_prepare_enable(mdwc->core_clk);
1521 clk_prepare_enable(mdwc->sleep_clk);
1522 clk_prepare_enable(mdwc->utmi_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001523 enable_irq(mdwc->pwr_event_irq);
1524 }
1525
1526 return ret;
1527}
1528
1529static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1530{
1531 u32 guctl, gfladj = 0;
1532
1533 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1534 guctl &= ~DWC3_GUCTL_REFCLKPER;
1535
1536 /* GFLADJ register is used starting with revision 2.50a */
1537 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1538 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1539 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1540 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1541 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1542 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1543 }
1544
1545 /* Refer to SNPS Databook Table 6-55 for calculations used */
1546 switch (mdwc->utmi_clk_rate) {
1547 case 19200000:
1548 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1549 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1550 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1551 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1552 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1553 break;
1554 case 24000000:
1555 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1556 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1557 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1558 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1559 break;
1560 default:
1561 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1562 mdwc->utmi_clk_rate);
1563 break;
1564 }
1565
1566 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1567 if (gfladj)
1568 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1569}
1570
1571/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1572static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1573{
1574 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1575 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1576 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1577 BIT(2), 1);
1578
1579 /*
1580 * Enable master clock for RAMs to allow BAM to access RAMs when
1581 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1582 * are seen where RAM clocks get turned OFF in SS mode
1583 */
1584 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1585 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1586
1587}
1588
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001589static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1590{
1591 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1592 vbus_draw_work);
1593 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1594
1595 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1596}
1597
Mayank Rana511f3b22016-08-02 12:00:11 -07001598static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1599{
1600 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1601 u32 reg;
1602
1603 if (dwc->revision < DWC3_REVISION_230A)
1604 return;
1605
1606 switch (event) {
1607 case DWC3_CONTROLLER_ERROR_EVENT:
1608 dev_info(mdwc->dev,
1609 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1610 dwc->irq_cnt);
1611
1612 dwc3_gadget_disable_irq(dwc);
1613
1614 /* prevent core from generating interrupts until recovery */
1615 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1616 reg |= DWC3_GCTL_CORESOFTRESET;
1617 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1618
1619 /* restart USB which performs full reset and reconnect */
1620 schedule_work(&mdwc->restart_usb_work);
1621 break;
1622 case DWC3_CONTROLLER_RESET_EVENT:
1623 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1624 /* HS & SSPHYs get reset as part of core soft reset */
1625 dwc3_msm_qscratch_reg_init(mdwc);
1626 break;
1627 case DWC3_CONTROLLER_POST_RESET_EVENT:
1628 dev_dbg(mdwc->dev,
1629 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1630
1631 /*
1632 * Below sequence is used when controller is working without
1633 * having ssphy and only USB high speed is supported.
1634 */
1635 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1636 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1637 dwc3_msm_read_reg(mdwc->base,
1638 QSCRATCH_GENERAL_CFG)
1639 | PIPE_UTMI_CLK_DIS);
1640
1641 usleep_range(2, 5);
1642
1643
1644 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1645 dwc3_msm_read_reg(mdwc->base,
1646 QSCRATCH_GENERAL_CFG)
1647 | PIPE_UTMI_CLK_SEL
1648 | PIPE3_PHYSTATUS_SW);
1649
1650 usleep_range(2, 5);
1651
1652 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1653 dwc3_msm_read_reg(mdwc->base,
1654 QSCRATCH_GENERAL_CFG)
1655 & ~PIPE_UTMI_CLK_DIS);
1656 }
1657
1658 dwc3_msm_update_ref_clk(mdwc);
1659 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1660 break;
1661 case DWC3_CONTROLLER_CONNDONE_EVENT:
1662 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1663 /*
1664 * Add power event if the dbm indicates coming out of L1 by
1665 * interrupt
1666 */
1667 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1668 dwc3_msm_write_reg_field(mdwc->base,
1669 PWR_EVNT_IRQ_MASK_REG,
1670 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1671
1672 atomic_set(&dwc->in_lpm, 0);
1673 break;
1674 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1675 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1676 if (dwc->enable_bus_suspend) {
1677 mdwc->suspend = dwc->b_suspend;
1678 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1679 }
1680 break;
1681 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1682 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001683 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001684 break;
1685 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1686 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
1687 dwc3_restart_usb_work(&mdwc->restart_usb_work);
1688 break;
1689 default:
1690 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1691 break;
1692 }
1693}
1694
1695static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1696{
1697 int ret = 0;
1698
1699 if (core_reset) {
1700 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1701 if (ret)
1702 return;
1703
1704 usleep_range(1000, 1200);
1705 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1706 if (ret)
1707 return;
1708
1709 usleep_range(10000, 12000);
1710 }
1711
1712 if (mdwc->dbm) {
1713 /* Reset the DBM */
1714 dbm_soft_reset(mdwc->dbm, 1);
1715 usleep_range(1000, 1200);
1716 dbm_soft_reset(mdwc->dbm, 0);
1717
1718 /*enable DBM*/
1719 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1720 DBM_EN_MASK, 0x1);
1721 dbm_enable(mdwc->dbm);
1722 }
1723}
1724
1725static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1726{
1727 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1728 u32 val;
1729
1730 /* Configure AHB2PHY for one wait state read/write */
1731 if (mdwc->ahb2phy_base) {
1732 clk_prepare_enable(mdwc->cfg_ahb_clk);
1733 val = readl_relaxed(mdwc->ahb2phy_base +
1734 PERIPH_SS_AHB2PHY_TOP_CFG);
1735 if (val != ONE_READ_WRITE_WAIT) {
1736 writel_relaxed(ONE_READ_WRITE_WAIT,
1737 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1738 /* complete above write before configuring USB PHY. */
1739 mb();
1740 }
1741 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1742 }
1743
1744 if (!mdwc->init) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001745 dwc3_core_pre_init(dwc);
1746 mdwc->init = true;
1747 }
1748
1749 dwc3_core_init(dwc);
1750 /* Re-configure event buffers */
1751 dwc3_event_buffers_setup(dwc);
1752}
1753
1754static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1755{
1756 unsigned long timeout;
1757 u32 reg = 0;
1758
1759 if ((mdwc->in_host_mode || mdwc->vbus_active)
1760 && dwc3_msm_is_superspeed(mdwc)) {
1761 if (!atomic_read(&mdwc->in_p3)) {
1762 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1763 return -EBUSY;
1764 }
1765 }
1766
1767 /* Clear previous L2 events */
1768 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1769 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1770
1771 /* Prepare HSPHY for suspend */
1772 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1773 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1774 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1775
1776 /* Wait for PHY to go into L2 */
1777 timeout = jiffies + msecs_to_jiffies(5);
1778 while (!time_after(jiffies, timeout)) {
1779 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1780 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1781 break;
1782 }
1783 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1784 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1785
1786 /* Clear L2 event bit */
1787 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1788 PWR_EVNT_LPM_IN_L2_MASK);
1789
1790 return 0;
1791}
1792
1793static void dwc3_msm_bus_vote_w(struct work_struct *w)
1794{
1795 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1796 int ret;
1797
1798 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1799 mdwc->bus_vote);
1800 if (ret)
1801 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1802}
1803
1804static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1805{
1806 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1807 int i, num_ports;
1808 u32 reg;
1809
1810 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1811 if (mdwc->in_host_mode) {
1812 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1813 num_ports = HCS_MAX_PORTS(reg);
1814 for (i = 0; i < num_ports; i++) {
1815 reg = dwc3_msm_read_reg(mdwc->base,
1816 USB3_PORTSC + i*0x10);
1817 if (reg & PORT_PE) {
1818 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1819 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1820 else if (DEV_LOWSPEED(reg))
1821 mdwc->hs_phy->flags |= PHY_LS_MODE;
1822 }
1823 }
1824 } else {
1825 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1826 dwc->gadget.speed == USB_SPEED_FULL)
1827 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1828 else if (dwc->gadget.speed == USB_SPEED_LOW)
1829 mdwc->hs_phy->flags |= PHY_LS_MODE;
1830 }
1831}
1832
1833
1834static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1835{
Mayank Rana83ad5822016-08-09 14:17:22 -07001836 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001837 bool can_suspend_ssphy;
1838 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07001839 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001840
1841 if (atomic_read(&dwc->in_lpm)) {
1842 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1843 return 0;
1844 }
1845
1846 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07001847 evt = dwc->ev_buf;
1848 if ((evt->flags & DWC3_EVENT_PENDING)) {
1849 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001850 "%s: %d device events pending, abort suspend\n",
1851 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07001852 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07001853 }
1854 }
1855
1856 if (!mdwc->vbus_active && dwc->is_drd &&
1857 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
1858 /*
1859 * In some cases, the pm_runtime_suspend may be called by
1860 * usb_bam when there is pending lpm flag. However, if this is
1861 * done when cable was disconnected and otg state has not
1862 * yet changed to IDLE, then it means OTG state machine
1863 * is running and we race against it. So cancel LPM for now,
1864 * and OTG state machine will go for LPM later, after completing
1865 * transition to IDLE state.
1866 */
1867 dev_dbg(mdwc->dev,
1868 "%s: cable disconnected while not in idle otg state\n",
1869 __func__);
1870 return -EBUSY;
1871 }
1872
1873 /*
1874 * Check if device is not in CONFIGURED state
1875 * then check controller state of L2 and break
1876 * LPM sequence. Check this for device bus suspend case.
1877 */
1878 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
1879 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
1880 pr_err("%s(): Trying to go in LPM with state:%d\n",
1881 __func__, dwc->gadget.state);
1882 pr_err("%s(): LPM is not performed.\n", __func__);
1883 return -EBUSY;
1884 }
1885
1886 ret = dwc3_msm_prepare_suspend(mdwc);
1887 if (ret)
1888 return ret;
1889
1890 /* Initialize variables here */
1891 can_suspend_ssphy = !(mdwc->in_host_mode &&
1892 dwc3_msm_is_host_superspeed(mdwc));
1893
1894 /* Disable core irq */
1895 if (dwc->irq)
1896 disable_irq(dwc->irq);
1897
1898 /* disable power event irq, hs and ss phy irq is used as wake up src */
1899 disable_irq(mdwc->pwr_event_irq);
1900
1901 dwc3_set_phy_speed_flags(mdwc);
1902 /* Suspend HS PHY */
1903 usb_phy_set_suspend(mdwc->hs_phy, 1);
1904
1905 /* Suspend SS PHY */
1906 if (can_suspend_ssphy) {
1907 /* indicate phy about SS mode */
1908 if (dwc3_msm_is_superspeed(mdwc))
1909 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
1910 usb_phy_set_suspend(mdwc->ss_phy, 1);
1911 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
1912 }
1913
1914 /* make sure above writes are completed before turning off clocks */
1915 wmb();
1916
1917 /* Disable clocks */
1918 if (mdwc->bus_aggr_clk)
1919 clk_disable_unprepare(mdwc->bus_aggr_clk);
1920 clk_disable_unprepare(mdwc->utmi_clk);
1921
1922 clk_set_rate(mdwc->core_clk, 19200000);
1923 clk_disable_unprepare(mdwc->core_clk);
1924 /*
1925 * Disable iface_clk only after core_clk as core_clk has FSM
1926 * depedency on iface_clk. Hence iface_clk should be turned off
1927 * after core_clk is turned off.
1928 */
1929 clk_disable_unprepare(mdwc->iface_clk);
1930 /* USB PHY no more requires TCXO */
1931 clk_disable_unprepare(mdwc->xo_clk);
1932
1933 /* Perform controller power collapse */
1934 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
1935 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
1936 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
1937 dwc3_msm_config_gdsc(mdwc, 0);
1938 clk_disable_unprepare(mdwc->sleep_clk);
1939 }
1940
1941 /* Remove bus voting */
1942 if (mdwc->bus_perf_client) {
1943 mdwc->bus_vote = 0;
1944 schedule_work(&mdwc->bus_vote_w);
1945 }
1946
1947 /*
1948 * release wakeup source with timeout to defer system suspend to
1949 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
1950 * event is received.
1951 */
1952 if (mdwc->lpm_to_suspend_delay) {
1953 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
1954 mdwc->lpm_to_suspend_delay);
1955 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
1956 } else {
1957 pm_relax(mdwc->dev);
1958 }
1959
1960 atomic_set(&dwc->in_lpm, 1);
1961
1962 /*
1963 * with DCP or during cable disconnect, we dont require wakeup
1964 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
1965 * case of host bus suspend and device bus suspend.
1966 */
1967 if (mdwc->vbus_active || mdwc->in_host_mode) {
1968 enable_irq_wake(mdwc->hs_phy_irq);
1969 enable_irq(mdwc->hs_phy_irq);
1970 if (mdwc->ss_phy_irq) {
1971 enable_irq_wake(mdwc->ss_phy_irq);
1972 enable_irq(mdwc->ss_phy_irq);
1973 }
1974 /*
1975 * Enable power event irq during bus suspend in host mode for
1976 * mapping MPM pin for DP so that wakeup can happen in system
1977 * suspend.
1978 */
1979 if (mdwc->in_host_mode) {
1980 enable_irq(mdwc->pwr_event_irq);
1981 enable_irq_wake(mdwc->pwr_event_irq);
1982 }
1983 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
1984 }
1985
1986 dev_info(mdwc->dev, "DWC3 in low power mode\n");
1987 return 0;
1988}
1989
1990static int dwc3_msm_resume(struct dwc3_msm *mdwc)
1991{
1992 int ret;
1993 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1994
1995 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
1996
1997 if (!atomic_read(&dwc->in_lpm)) {
1998 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
1999 return 0;
2000 }
2001
2002 pm_stay_awake(mdwc->dev);
2003
2004 /* Enable bus voting */
2005 if (mdwc->bus_perf_client) {
2006 mdwc->bus_vote = 1;
2007 schedule_work(&mdwc->bus_vote_w);
2008 }
2009
2010 /* Vote for TCXO while waking up USB HSPHY */
2011 ret = clk_prepare_enable(mdwc->xo_clk);
2012 if (ret)
2013 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2014 __func__, ret);
2015
2016 /* Restore controller power collapse */
2017 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2018 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2019 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302020 ret = reset_control_assert(mdwc->core_reset);
2021 if (ret)
2022 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2023 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002024 /* HW requires a short delay for reset to take place properly */
2025 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302026 ret = reset_control_deassert(mdwc->core_reset);
2027 if (ret)
2028 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2029 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002030 clk_prepare_enable(mdwc->sleep_clk);
2031 }
2032
2033 /*
2034 * Enable clocks
2035 * Turned ON iface_clk before core_clk due to FSM depedency.
2036 */
2037 clk_prepare_enable(mdwc->iface_clk);
2038 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2039 clk_prepare_enable(mdwc->core_clk);
2040 clk_prepare_enable(mdwc->utmi_clk);
2041 if (mdwc->bus_aggr_clk)
2042 clk_prepare_enable(mdwc->bus_aggr_clk);
2043
2044 /* Resume SS PHY */
2045 if (mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
2046 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2047 if (mdwc->typec_orientation == ORIENTATION_CC1)
2048 mdwc->ss_phy->flags |= PHY_LANE_A;
2049 if (mdwc->typec_orientation == ORIENTATION_CC2)
2050 mdwc->ss_phy->flags |= PHY_LANE_B;
2051 usb_phy_set_suspend(mdwc->ss_phy, 0);
2052 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2053 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2054 }
2055
2056 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2057 /* Resume HS PHY */
2058 usb_phy_set_suspend(mdwc->hs_phy, 0);
2059
2060 /* Recover from controller power collapse */
2061 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2062 u32 tmp;
2063
2064 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2065
2066 dwc3_msm_power_collapse_por(mdwc);
2067
2068 /* Get initial P3 status and enable IN_P3 event */
2069 tmp = dwc3_msm_read_reg_field(mdwc->base,
2070 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2071 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2072 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2073 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2074
2075 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2076 }
2077
2078 atomic_set(&dwc->in_lpm, 0);
2079
2080 /* Disable HSPHY auto suspend */
2081 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2082 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2083 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2084 DWC3_GUSB2PHYCFG_SUSPHY));
2085
2086 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2087 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2088 disable_irq_wake(mdwc->hs_phy_irq);
2089 disable_irq_nosync(mdwc->hs_phy_irq);
2090 if (mdwc->ss_phy_irq) {
2091 disable_irq_wake(mdwc->ss_phy_irq);
2092 disable_irq_nosync(mdwc->ss_phy_irq);
2093 }
2094 if (mdwc->in_host_mode) {
2095 disable_irq_wake(mdwc->pwr_event_irq);
2096 disable_irq(mdwc->pwr_event_irq);
2097 }
2098 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2099 }
2100
2101 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2102
2103 /* enable power evt irq for IN P3 detection */
2104 enable_irq(mdwc->pwr_event_irq);
2105
2106 /* Enable core irq */
2107 if (dwc->irq)
2108 enable_irq(dwc->irq);
2109
2110 /*
2111 * Handle other power events that could not have been handled during
2112 * Low Power Mode
2113 */
2114 dwc3_pwr_event_handler(mdwc);
2115
Mayank Rana511f3b22016-08-02 12:00:11 -07002116 return 0;
2117}
2118
2119/**
2120 * dwc3_ext_event_notify - callback to handle events from external transceiver
2121 *
2122 * Returns 0 on success
2123 */
2124static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2125{
2126 /* Flush processing any pending events before handling new ones */
2127 flush_delayed_work(&mdwc->sm_work);
2128
2129 if (mdwc->id_state == DWC3_ID_FLOAT) {
2130 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2131 set_bit(ID, &mdwc->inputs);
2132 } else {
2133 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2134 clear_bit(ID, &mdwc->inputs);
2135 }
2136
2137 if (mdwc->vbus_active && !mdwc->in_restart) {
2138 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2139 set_bit(B_SESS_VLD, &mdwc->inputs);
2140 } else {
2141 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2142 clear_bit(B_SESS_VLD, &mdwc->inputs);
2143 }
2144
2145 if (mdwc->suspend) {
2146 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2147 set_bit(B_SUSPEND, &mdwc->inputs);
2148 } else {
2149 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2150 clear_bit(B_SUSPEND, &mdwc->inputs);
2151 }
2152
2153 schedule_delayed_work(&mdwc->sm_work, 0);
2154}
2155
2156static void dwc3_resume_work(struct work_struct *w)
2157{
2158 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002159
2160 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2161
2162 /*
2163 * exit LPM first to meet resume timeline from device side.
2164 * resume_pending flag would prevent calling
2165 * dwc3_msm_resume() in case we are here due to system
2166 * wide resume without usb cable connected. This flag is set
2167 * only in case of power event irq in lpm.
2168 */
2169 if (mdwc->resume_pending) {
2170 dwc3_msm_resume(mdwc);
2171 mdwc->resume_pending = false;
2172 }
2173
Mayank Rana83ad5822016-08-09 14:17:22 -07002174 if (atomic_read(&mdwc->pm_suspended))
Mayank Rana511f3b22016-08-02 12:00:11 -07002175 /* let pm resume kick in resume work later */
2176 return;
Mayank Rana511f3b22016-08-02 12:00:11 -07002177 dwc3_ext_event_notify(mdwc);
2178}
2179
2180static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2181{
2182 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2183 u32 irq_stat, irq_clear = 0;
2184
2185 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2186 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2187
2188 /* Check for P3 events */
2189 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2190 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2191 /* Can't tell if entered or exit P3, so check LINKSTATE */
2192 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2193 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2194 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2195 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2196
2197 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2198 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2199 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2200 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2201 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2202 atomic_set(&mdwc->in_p3, 0);
2203 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2204 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2205 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2206 atomic_set(&mdwc->in_p3, 1);
2207 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2208 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2209 }
2210
2211 /* Clear L2 exit */
2212 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2213 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2214 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2215 }
2216
2217 /* Handle exit from L1 events */
2218 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2219 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2220 __func__);
2221 if (usb_gadget_wakeup(&dwc->gadget))
2222 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2223 __func__);
2224 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2225 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2226 }
2227
2228 /* Unhandled events */
2229 if (irq_stat)
2230 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2231 __func__, irq_stat);
2232
2233 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2234}
2235
2236static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2237{
2238 struct dwc3_msm *mdwc = _mdwc;
2239 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2240
2241 dev_dbg(mdwc->dev, "%s\n", __func__);
2242
2243 if (atomic_read(&dwc->in_lpm))
2244 dwc3_resume_work(&mdwc->resume_work);
2245 else
2246 dwc3_pwr_event_handler(mdwc);
2247
Mayank Rana511f3b22016-08-02 12:00:11 -07002248 return IRQ_HANDLED;
2249}
2250
2251static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2252{
2253 struct dwc3_msm *mdwc = data;
2254 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2255
2256 dwc->t_pwr_evt_irq = ktime_get();
2257 dev_dbg(mdwc->dev, "%s received\n", __func__);
2258 /*
2259 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2260 * which interrupts have been triggered, as the clocks are disabled.
2261 * Resume controller by waking up pwr event irq thread.After re-enabling
2262 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2263 * all other power events.
2264 */
2265 if (atomic_read(&dwc->in_lpm)) {
2266 /* set this to call dwc3_msm_resume() */
2267 mdwc->resume_pending = true;
2268 return IRQ_WAKE_THREAD;
2269 }
2270
2271 dwc3_pwr_event_handler(mdwc);
2272 return IRQ_HANDLED;
2273}
2274
2275static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2276 unsigned long action, void *hcpu)
2277{
2278 uint32_t cpu = (uintptr_t)hcpu;
2279 struct dwc3_msm *mdwc =
2280 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2281
2282 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2283 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2284 cpu_to_affin, mdwc->irq_to_affin);
2285 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2286 }
2287
2288 return NOTIFY_OK;
2289}
2290
2291static void dwc3_otg_sm_work(struct work_struct *w);
2292
2293static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2294{
2295 int ret;
2296
2297 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2298 if (IS_ERR(mdwc->dwc3_gdsc))
2299 mdwc->dwc3_gdsc = NULL;
2300
2301 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2302 if (IS_ERR(mdwc->xo_clk)) {
2303 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2304 __func__);
2305 ret = PTR_ERR(mdwc->xo_clk);
2306 return ret;
2307 }
2308 clk_set_rate(mdwc->xo_clk, 19200000);
2309
2310 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2311 if (IS_ERR(mdwc->iface_clk)) {
2312 dev_err(mdwc->dev, "failed to get iface_clk\n");
2313 ret = PTR_ERR(mdwc->iface_clk);
2314 return ret;
2315 }
2316
2317 /*
2318 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2319 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2320 * On newer platform it can run at 150MHz as well.
2321 */
2322 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2323 if (IS_ERR(mdwc->core_clk)) {
2324 dev_err(mdwc->dev, "failed to get core_clk\n");
2325 ret = PTR_ERR(mdwc->core_clk);
2326 return ret;
2327 }
2328
Amit Nischal4d278212016-06-06 17:54:34 +05302329 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2330 if (IS_ERR(mdwc->core_reset)) {
2331 dev_err(mdwc->dev, "failed to get core_reset\n");
2332 return PTR_ERR(mdwc->core_reset);
2333 }
2334
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302335 if (!of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
2336 (u32 *)&mdwc->core_clk_rate)) {
2337 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
2338 mdwc->core_clk_rate);
2339 } else {
2340 /*
2341 * Get Max supported clk frequency for USB Core CLK and request
2342 * to set the same.
2343 */
2344 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
2345 }
2346
Mayank Rana511f3b22016-08-02 12:00:11 -07002347 if (IS_ERR_VALUE(mdwc->core_clk_rate)) {
2348 dev_err(mdwc->dev, "fail to get core clk max freq.\n");
2349 } else {
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302350 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2351 mdwc->core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002352 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2353 if (ret)
2354 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n",
2355 ret);
2356 }
2357
2358 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2359 if (IS_ERR(mdwc->sleep_clk)) {
2360 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2361 ret = PTR_ERR(mdwc->sleep_clk);
2362 return ret;
2363 }
2364
2365 clk_set_rate(mdwc->sleep_clk, 32000);
2366 mdwc->utmi_clk_rate = 19200000;
2367 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2368 if (IS_ERR(mdwc->utmi_clk)) {
2369 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2370 ret = PTR_ERR(mdwc->utmi_clk);
2371 return ret;
2372 }
2373
2374 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2375 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2376 if (IS_ERR(mdwc->bus_aggr_clk))
2377 mdwc->bus_aggr_clk = NULL;
2378
2379 if (of_property_match_string(mdwc->dev->of_node,
2380 "clock-names", "cfg_ahb_clk") >= 0) {
2381 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2382 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2383 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2384 mdwc->cfg_ahb_clk = NULL;
2385 if (ret != -EPROBE_DEFER)
2386 dev_err(mdwc->dev,
2387 "failed to get cfg_ahb_clk ret %d\n",
2388 ret);
2389 return ret;
2390 }
2391 }
2392
2393 return 0;
2394}
2395
2396static int dwc3_msm_id_notifier(struct notifier_block *nb,
2397 unsigned long event, void *ptr)
2398{
2399 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
2400 struct extcon_dev *edev = ptr;
2401 enum dwc3_id_state id;
2402 int cc_state;
2403
2404 if (!edev) {
2405 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2406 goto done;
2407 }
2408
2409 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2410
2411 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2412
2413 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2414 if (cc_state < 0)
2415 mdwc->typec_orientation = ORIENTATION_NONE;
2416 else
2417 mdwc->typec_orientation =
2418 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2419
Mayank Rana511f3b22016-08-02 12:00:11 -07002420 if (mdwc->id_state != id) {
2421 mdwc->id_state = id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002422 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2423 }
2424
2425done:
2426 return NOTIFY_DONE;
2427}
2428
2429static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2430 unsigned long event, void *ptr)
2431{
2432 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2433 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2434 struct extcon_dev *edev = ptr;
2435 int cc_state;
2436
2437 if (!edev) {
2438 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2439 goto done;
2440 }
2441
2442 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2443
2444 if (mdwc->vbus_active == event)
2445 return NOTIFY_DONE;
2446
2447 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2448 if (cc_state < 0)
2449 mdwc->typec_orientation = ORIENTATION_NONE;
2450 else
2451 mdwc->typec_orientation =
2452 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2453
Mayank Rana511f3b22016-08-02 12:00:11 -07002454 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002455 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002456 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002457done:
2458 return NOTIFY_DONE;
2459}
2460
2461static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2462{
2463 struct device_node *node = mdwc->dev->of_node;
2464 struct extcon_dev *edev;
2465 int ret = 0;
2466
2467 if (!of_property_read_bool(node, "extcon"))
2468 return 0;
2469
2470 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2471 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2472 return PTR_ERR(edev);
2473
2474 if (!IS_ERR(edev)) {
2475 mdwc->extcon_vbus = edev;
2476 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2477 ret = extcon_register_notifier(edev, EXTCON_USB,
2478 &mdwc->vbus_nb);
2479 if (ret < 0) {
2480 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2481 return ret;
2482 }
2483 }
2484
2485 /* if a second phandle was provided, use it to get a separate edev */
2486 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2487 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2488 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2489 ret = PTR_ERR(edev);
2490 goto err;
2491 }
2492 }
2493
2494 if (!IS_ERR(edev)) {
2495 mdwc->extcon_id = edev;
2496 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2497 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2498 &mdwc->id_nb);
2499 if (ret < 0) {
2500 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2501 goto err;
2502 }
2503 }
2504
2505 return 0;
2506err:
2507 if (mdwc->extcon_vbus)
2508 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2509 &mdwc->vbus_nb);
2510 return ret;
2511}
2512
2513static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2514 char *buf)
2515{
2516 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2517
2518 if (mdwc->vbus_active)
2519 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2520 if (mdwc->id_state == DWC3_ID_GROUND)
2521 return snprintf(buf, PAGE_SIZE, "host\n");
2522
2523 return snprintf(buf, PAGE_SIZE, "none\n");
2524}
2525
2526static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2527 const char *buf, size_t count)
2528{
2529 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2530
2531 if (sysfs_streq(buf, "peripheral")) {
2532 mdwc->vbus_active = true;
2533 mdwc->id_state = DWC3_ID_FLOAT;
2534 } else if (sysfs_streq(buf, "host")) {
2535 mdwc->vbus_active = false;
2536 mdwc->id_state = DWC3_ID_GROUND;
2537 } else {
2538 mdwc->vbus_active = false;
2539 mdwc->id_state = DWC3_ID_FLOAT;
2540 }
2541
2542 dwc3_ext_event_notify(mdwc);
2543
2544 return count;
2545}
2546
2547static DEVICE_ATTR_RW(mode);
2548
2549static int dwc3_msm_probe(struct platform_device *pdev)
2550{
2551 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2552 struct device *dev = &pdev->dev;
2553 struct dwc3_msm *mdwc;
2554 struct dwc3 *dwc;
2555 struct resource *res;
2556 void __iomem *tcsr;
2557 bool host_mode;
2558 int ret = 0;
2559 int ext_hub_reset_gpio;
2560 u32 val;
2561
2562 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2563 if (!mdwc)
2564 return -ENOMEM;
2565
2566 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2567 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2568 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2569 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2570 return -EOPNOTSUPP;
2571 }
2572 }
2573
2574 platform_set_drvdata(pdev, mdwc);
2575 mdwc->dev = &pdev->dev;
2576
2577 INIT_LIST_HEAD(&mdwc->req_complete_list);
2578 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2579 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2580 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07002581 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002582 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
2583
2584 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2585 if (!mdwc->dwc3_wq) {
2586 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2587 return -ENOMEM;
2588 }
2589
2590 /* Get all clks and gdsc reference */
2591 ret = dwc3_msm_get_clk_gdsc(mdwc);
2592 if (ret) {
2593 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2594 return ret;
2595 }
2596
2597 mdwc->id_state = DWC3_ID_FLOAT;
2598 set_bit(ID, &mdwc->inputs);
2599
2600 mdwc->charging_disabled = of_property_read_bool(node,
2601 "qcom,charging-disabled");
2602
2603 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2604 &mdwc->lpm_to_suspend_delay);
2605 if (ret) {
2606 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2607 mdwc->lpm_to_suspend_delay = 0;
2608 }
2609
2610 /*
2611 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2612 * DP and DM linestate transitions during low power mode.
2613 */
2614 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2615 if (mdwc->hs_phy_irq < 0) {
2616 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2617 ret = -EINVAL;
2618 goto err;
2619 } else {
2620 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2621 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2622 msm_dwc3_pwr_irq,
2623 msm_dwc3_pwr_irq_thread,
2624 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2625 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2626 if (ret) {
2627 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2628 ret);
2629 goto err;
2630 }
2631 }
2632
2633 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2634 if (mdwc->ss_phy_irq < 0) {
2635 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2636 } else {
2637 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2638 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2639 msm_dwc3_pwr_irq,
2640 msm_dwc3_pwr_irq_thread,
2641 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2642 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2643 if (ret) {
2644 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2645 ret);
2646 goto err;
2647 }
2648 }
2649
2650 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2651 if (mdwc->pwr_event_irq < 0) {
2652 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2653 ret = -EINVAL;
2654 goto err;
2655 } else {
2656 /* will be enabled in dwc3_msm_resume() */
2657 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
2658 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
2659 msm_dwc3_pwr_irq,
2660 msm_dwc3_pwr_irq_thread,
2661 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
2662 "msm_dwc3", mdwc);
2663 if (ret) {
2664 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
2665 ret);
2666 goto err;
2667 }
2668 }
2669
2670 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
2671 if (!res) {
2672 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
2673 } else {
2674 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
2675 resource_size(res));
2676 if (IS_ERR_OR_NULL(tcsr)) {
2677 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
2678 } else {
2679 /* Enable USB3 on the primary USB port. */
2680 writel_relaxed(0x1, tcsr);
2681 /*
2682 * Ensure that TCSR write is completed before
2683 * USB registers initialization.
2684 */
2685 mb();
2686 }
2687 }
2688
2689 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
2690 if (!res) {
2691 dev_err(&pdev->dev, "missing memory base resource\n");
2692 ret = -ENODEV;
2693 goto err;
2694 }
2695
2696 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
2697 resource_size(res));
2698 if (!mdwc->base) {
2699 dev_err(&pdev->dev, "ioremap failed\n");
2700 ret = -ENODEV;
2701 goto err;
2702 }
2703
2704 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2705 "ahb2phy_base");
2706 if (res) {
2707 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
2708 res->start, resource_size(res));
2709 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
2710 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
2711 mdwc->ahb2phy_base = NULL;
2712 } else {
2713 /*
2714 * On some targets cfg_ahb_clk depends upon usb gdsc
2715 * regulator. If cfg_ahb_clk is enabled without
2716 * turning on usb gdsc regulator clk is stuck off.
2717 */
2718 dwc3_msm_config_gdsc(mdwc, 1);
2719 clk_prepare_enable(mdwc->cfg_ahb_clk);
2720 /* Configure AHB2PHY for one wait state read/write*/
2721 val = readl_relaxed(mdwc->ahb2phy_base +
2722 PERIPH_SS_AHB2PHY_TOP_CFG);
2723 if (val != ONE_READ_WRITE_WAIT) {
2724 writel_relaxed(ONE_READ_WRITE_WAIT,
2725 mdwc->ahb2phy_base +
2726 PERIPH_SS_AHB2PHY_TOP_CFG);
2727 /* complete above write before using USB PHY */
2728 mb();
2729 }
2730 clk_disable_unprepare(mdwc->cfg_ahb_clk);
2731 dwc3_msm_config_gdsc(mdwc, 0);
2732 }
2733 }
2734
2735 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
2736 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
2737 if (IS_ERR(mdwc->dbm)) {
2738 dev_err(&pdev->dev, "unable to get dbm device\n");
2739 ret = -EPROBE_DEFER;
2740 goto err;
2741 }
2742 /*
2743 * Add power event if the dbm indicates coming out of L1
2744 * by interrupt
2745 */
2746 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
2747 if (!mdwc->pwr_event_irq) {
2748 dev_err(&pdev->dev,
2749 "need pwr_event_irq exiting L1\n");
2750 ret = -EINVAL;
2751 goto err;
2752 }
2753 }
2754 }
2755
2756 ext_hub_reset_gpio = of_get_named_gpio(node,
2757 "qcom,ext-hub-reset-gpio", 0);
2758
2759 if (gpio_is_valid(ext_hub_reset_gpio)
2760 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
2761 "qcom,ext-hub-reset-gpio"))) {
2762 /* reset external hub */
2763 gpio_direction_output(ext_hub_reset_gpio, 1);
2764 /*
2765 * Hub reset should be asserted for minimum 5microsec
2766 * before deasserting.
2767 */
2768 usleep_range(5, 1000);
2769 gpio_direction_output(ext_hub_reset_gpio, 0);
2770 }
2771
2772 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
2773 &mdwc->tx_fifo_size))
2774 dev_err(&pdev->dev,
2775 "unable to read platform data tx fifo size\n");
2776
2777 mdwc->disable_host_mode_pm = of_property_read_bool(node,
2778 "qcom,disable-host-mode-pm");
2779
2780 dwc3_set_notifier(&dwc3_msm_notify_event);
2781
2782 /* Assumes dwc3 is the first DT child of dwc3-msm */
2783 dwc3_node = of_get_next_available_child(node, NULL);
2784 if (!dwc3_node) {
2785 dev_err(&pdev->dev, "failed to find dwc3 child\n");
2786 ret = -ENODEV;
2787 goto err;
2788 }
2789
2790 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2791 if (ret) {
2792 dev_err(&pdev->dev,
2793 "failed to add create dwc3 core\n");
2794 of_node_put(dwc3_node);
2795 goto err;
2796 }
2797
2798 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
2799 of_node_put(dwc3_node);
2800 if (!mdwc->dwc3) {
2801 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
2802 goto put_dwc3;
2803 }
2804
2805 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2806 "usb-phy", 0);
2807 if (IS_ERR(mdwc->hs_phy)) {
2808 dev_err(&pdev->dev, "unable to get hsphy device\n");
2809 ret = PTR_ERR(mdwc->hs_phy);
2810 goto put_dwc3;
2811 }
2812 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2813 "usb-phy", 1);
2814 if (IS_ERR(mdwc->ss_phy)) {
2815 dev_err(&pdev->dev, "unable to get ssphy device\n");
2816 ret = PTR_ERR(mdwc->ss_phy);
2817 goto put_dwc3;
2818 }
2819
2820 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2821 if (mdwc->bus_scale_table) {
2822 mdwc->bus_perf_client =
2823 msm_bus_scale_register_client(mdwc->bus_scale_table);
2824 }
2825
2826 dwc = platform_get_drvdata(mdwc->dwc3);
2827 if (!dwc) {
2828 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
2829 goto put_dwc3;
2830 }
2831
2832 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
2833 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
2834
2835 if (cpu_to_affin)
2836 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2837
2838 /*
2839 * Clocks and regulators will not be turned on until the first time
2840 * runtime PM resume is called. This is to allow for booting up with
2841 * charger already connected so as not to disturb PHY line states.
2842 */
2843 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
2844 atomic_set(&dwc->in_lpm, 1);
2845 pm_runtime_set_suspended(mdwc->dev);
2846 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
2847 pm_runtime_use_autosuspend(mdwc->dev);
2848 pm_runtime_enable(mdwc->dev);
2849 device_init_wakeup(mdwc->dev, 1);
2850
2851 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
2852 pm_runtime_get_noresume(mdwc->dev);
2853
2854 ret = dwc3_msm_extcon_register(mdwc);
2855 if (ret)
2856 goto put_dwc3;
2857
2858 /* Update initial VBUS/ID state from extcon */
2859 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
2860 EXTCON_USB))
2861 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
2862 if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
2863 EXTCON_USB_HOST))
2864 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
2865
2866 device_create_file(&pdev->dev, &dev_attr_mode);
2867
2868 schedule_delayed_work(&mdwc->sm_work, 0);
2869
2870 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
2871 if (!dwc->is_drd && host_mode) {
2872 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
2873 mdwc->id_state = DWC3_ID_GROUND;
2874 dwc3_ext_event_notify(mdwc);
2875 }
2876
2877 return 0;
2878
2879put_dwc3:
2880 platform_device_put(mdwc->dwc3);
2881 if (mdwc->bus_perf_client)
2882 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2883err:
2884 return ret;
2885}
2886
2887static int dwc3_msm_remove_children(struct device *dev, void *data)
2888{
2889 device_unregister(dev);
2890 return 0;
2891}
2892
2893static int dwc3_msm_remove(struct platform_device *pdev)
2894{
2895 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
2896 int ret_pm;
2897
2898 device_remove_file(&pdev->dev, &dev_attr_mode);
2899
2900 if (cpu_to_affin)
2901 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2902
2903 /*
2904 * In case of system suspend, pm_runtime_get_sync fails.
2905 * Hence turn ON the clocks manually.
2906 */
2907 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002908 if (ret_pm < 0) {
2909 dev_err(mdwc->dev,
2910 "pm_runtime_get_sync failed with %d\n", ret_pm);
2911 clk_prepare_enable(mdwc->utmi_clk);
2912 clk_prepare_enable(mdwc->core_clk);
2913 clk_prepare_enable(mdwc->iface_clk);
2914 clk_prepare_enable(mdwc->sleep_clk);
2915 if (mdwc->bus_aggr_clk)
2916 clk_prepare_enable(mdwc->bus_aggr_clk);
2917 clk_prepare_enable(mdwc->xo_clk);
2918 }
2919
2920 cancel_delayed_work_sync(&mdwc->sm_work);
2921
2922 if (mdwc->hs_phy)
2923 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
2924 platform_device_put(mdwc->dwc3);
2925 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
2926
Mayank Rana511f3b22016-08-02 12:00:11 -07002927 pm_runtime_disable(mdwc->dev);
2928 pm_runtime_barrier(mdwc->dev);
2929 pm_runtime_put_sync(mdwc->dev);
2930 pm_runtime_set_suspended(mdwc->dev);
2931 device_wakeup_disable(mdwc->dev);
2932
2933 if (mdwc->bus_perf_client)
2934 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2935
2936 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
2937 regulator_disable(mdwc->vbus_reg);
2938
2939 disable_irq(mdwc->hs_phy_irq);
2940 if (mdwc->ss_phy_irq)
2941 disable_irq(mdwc->ss_phy_irq);
2942 disable_irq(mdwc->pwr_event_irq);
2943
2944 clk_disable_unprepare(mdwc->utmi_clk);
2945 clk_set_rate(mdwc->core_clk, 19200000);
2946 clk_disable_unprepare(mdwc->core_clk);
2947 clk_disable_unprepare(mdwc->iface_clk);
2948 clk_disable_unprepare(mdwc->sleep_clk);
2949 clk_disable_unprepare(mdwc->xo_clk);
2950 clk_put(mdwc->xo_clk);
2951
2952 dwc3_msm_config_gdsc(mdwc, 0);
2953
2954 return 0;
2955}
2956
2957#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
2958
2959/**
2960 * dwc3_otg_start_host - helper function for starting/stoping the host
2961 * controller driver.
2962 *
2963 * @mdwc: Pointer to the dwc3_msm structure.
2964 * @on: start / stop the host controller driver.
2965 *
2966 * Returns 0 on success otherwise negative errno.
2967 */
2968static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
2969{
2970 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2971 int ret = 0;
2972
2973 if (!dwc->xhci)
2974 return -EINVAL;
2975
2976 /*
2977 * The vbus_reg pointer could have multiple values
2978 * NULL: regulator_get() hasn't been called, or was previously deferred
2979 * IS_ERR: regulator could not be obtained, so skip using it
2980 * Valid pointer otherwise
2981 */
2982 if (!mdwc->vbus_reg) {
2983 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
2984 "vbus_dwc3");
2985 if (IS_ERR(mdwc->vbus_reg) &&
2986 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
2987 /* regulators may not be ready, so retry again later */
2988 mdwc->vbus_reg = NULL;
2989 return -EPROBE_DEFER;
2990 }
2991 }
2992
2993 if (on) {
2994 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
2995
2996 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002997 mdwc->hs_phy->flags |= PHY_HOST_MODE;
2998 mdwc->ss_phy->flags |= PHY_HOST_MODE;
2999 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3000 if (!IS_ERR(mdwc->vbus_reg))
3001 ret = regulator_enable(mdwc->vbus_reg);
3002 if (ret) {
3003 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3004 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3005 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3006 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003007 return ret;
3008 }
3009
3010 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3011
3012 /*
3013 * FIXME If micro A cable is disconnected during system suspend,
3014 * xhci platform device will be removed before runtime pm is
3015 * enabled for xhci device. Due to this, disable_depth becomes
3016 * greater than one and runtimepm is not enabled for next microA
3017 * connect. Fix this by calling pm_runtime_init for xhci device.
3018 */
3019 pm_runtime_init(&dwc->xhci->dev);
3020 ret = platform_device_add(dwc->xhci);
3021 if (ret) {
3022 dev_err(mdwc->dev,
3023 "%s: failed to add XHCI pdev ret=%d\n",
3024 __func__, ret);
3025 if (!IS_ERR(mdwc->vbus_reg))
3026 regulator_disable(mdwc->vbus_reg);
3027 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3028 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3029 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003030 return ret;
3031 }
3032
3033 /*
3034 * In some cases it is observed that USB PHY is not going into
3035 * suspend with host mode suspend functionality. Hence disable
3036 * XHCI's runtime PM here if disable_host_mode_pm is set.
3037 */
3038 if (mdwc->disable_host_mode_pm)
3039 pm_runtime_disable(&dwc->xhci->dev);
3040
3041 mdwc->in_host_mode = true;
3042 dwc3_usb3_phy_suspend(dwc, true);
3043
3044 /* xHCI should have incremented child count as necessary */
Mayank Rana511f3b22016-08-02 12:00:11 -07003045 pm_runtime_mark_last_busy(mdwc->dev);
3046 pm_runtime_put_sync_autosuspend(mdwc->dev);
3047 } else {
3048 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3049
3050 if (!IS_ERR(mdwc->vbus_reg))
3051 ret = regulator_disable(mdwc->vbus_reg);
3052 if (ret) {
3053 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3054 return ret;
3055 }
3056
3057 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003058 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3059 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3060 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3061 platform_device_del(dwc->xhci);
3062
3063 /*
3064 * Perform USB hardware RESET (both core reset and DBM reset)
3065 * when moving from host to peripheral. This is required for
3066 * peripheral mode to work.
3067 */
3068 dwc3_msm_block_reset(mdwc, true);
3069
3070 dwc3_usb3_phy_suspend(dwc, false);
3071 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3072
3073 mdwc->in_host_mode = false;
3074
3075 /* re-init core and OTG registers as block reset clears these */
3076 dwc3_post_host_reset_core_init(dwc);
3077 pm_runtime_mark_last_busy(mdwc->dev);
3078 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003079 }
3080
3081 return 0;
3082}
3083
3084static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3085{
3086 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3087
3088 /* Update OTG VBUS Valid from HSPHY to controller */
3089 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3090 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3091 UTMI_OTG_VBUS_VALID,
3092 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3093
3094 /* Update only if Super Speed is supported */
3095 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3096 /* Update VBUS Valid from SSPHY to controller */
3097 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3098 LANE0_PWR_PRESENT,
3099 vbus_present ? LANE0_PWR_PRESENT : 0);
3100 }
3101}
3102
3103/**
3104 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3105 *
3106 * @mdwc: Pointer to the dwc3_msm structure.
3107 * @on: Turn ON/OFF the gadget.
3108 *
3109 * Returns 0 on success otherwise negative errno.
3110 */
3111static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3112{
3113 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3114
3115 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003116
3117 if (on) {
3118 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3119 __func__, dwc->gadget.name);
3120
3121 dwc3_override_vbus_status(mdwc, true);
3122 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3123 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3124
3125 /*
3126 * Core reset is not required during start peripheral. Only
3127 * DBM reset is required, hence perform only DBM reset here.
3128 */
3129 dwc3_msm_block_reset(mdwc, false);
3130
3131 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3132 usb_gadget_vbus_connect(&dwc->gadget);
3133 } else {
3134 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3135 __func__, dwc->gadget.name);
3136 usb_gadget_vbus_disconnect(&dwc->gadget);
3137 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3138 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3139 dwc3_override_vbus_status(mdwc, false);
3140 dwc3_usb3_phy_suspend(dwc, false);
3141 }
3142
3143 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003144
3145 return 0;
3146}
3147
3148static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3149{
Jack Phamd72bafe2016-08-09 11:07:22 -07003150 union power_supply_propval pval = {1000 * mA};
3151 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003152
3153 if (mdwc->charging_disabled)
3154 return 0;
3155
3156 if (mdwc->max_power == mA)
3157 return 0;
3158
3159 if (!mdwc->usb_psy) {
3160 mdwc->usb_psy = power_supply_get_by_name("usb");
3161 if (!mdwc->usb_psy) {
3162 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3163 return -ENODEV;
3164 }
3165 }
3166
3167 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3168
Mayank Rana511f3b22016-08-02 12:00:11 -07003169 /* Set max current limit in uA */
Jack Phamd72bafe2016-08-09 11:07:22 -07003170 ret = power_supply_set_property(mdwc->usb_psy,
3171 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3172 if (ret) {
3173 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3174 return ret;
3175 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003176
3177 mdwc->max_power = mA;
3178 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003179}
3180
3181
3182/**
3183 * dwc3_otg_sm_work - workqueue function.
3184 *
3185 * @w: Pointer to the dwc3 otg workqueue
3186 *
3187 * NOTE: After any change in otg_state, we must reschdule the state machine.
3188 */
3189static void dwc3_otg_sm_work(struct work_struct *w)
3190{
3191 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3192 struct dwc3 *dwc = NULL;
3193 bool work = 0;
3194 int ret = 0;
3195 unsigned long delay = 0;
3196 const char *state;
3197
3198 if (mdwc->dwc3)
3199 dwc = platform_get_drvdata(mdwc->dwc3);
3200
3201 if (!dwc) {
3202 dev_err(mdwc->dev, "dwc is NULL.\n");
3203 return;
3204 }
3205
3206 state = usb_otg_state_string(mdwc->otg_state);
3207 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana511f3b22016-08-02 12:00:11 -07003208
3209 /* Check OTG state */
3210 switch (mdwc->otg_state) {
3211 case OTG_STATE_UNDEFINED:
3212 /* Do nothing if no cable connected */
3213 if (test_bit(ID, &mdwc->inputs) &&
3214 !test_bit(B_SESS_VLD, &mdwc->inputs))
3215 break;
3216
Mayank Rana511f3b22016-08-02 12:00:11 -07003217 mdwc->otg_state = OTG_STATE_B_IDLE;
3218 /* fall-through */
3219 case OTG_STATE_B_IDLE:
3220 if (!test_bit(ID, &mdwc->inputs)) {
3221 dev_dbg(mdwc->dev, "!id\n");
3222 mdwc->otg_state = OTG_STATE_A_IDLE;
3223 work = 1;
3224 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3225 dev_dbg(mdwc->dev, "b_sess_vld\n");
3226 /*
3227 * Increment pm usage count upon cable connect. Count
3228 * is decremented in OTG_STATE_B_PERIPHERAL state on
3229 * cable disconnect or in bus suspend.
3230 */
3231 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003232 dwc3_otg_start_peripheral(mdwc, 1);
3233 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3234 work = 1;
3235 } else {
3236 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3237 dev_dbg(mdwc->dev, "Cable disconnected\n");
3238 }
3239 break;
3240
3241 case OTG_STATE_B_PERIPHERAL:
3242 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3243 !test_bit(ID, &mdwc->inputs)) {
3244 dev_dbg(mdwc->dev, "!id || !bsv\n");
3245 mdwc->otg_state = OTG_STATE_B_IDLE;
3246 dwc3_otg_start_peripheral(mdwc, 0);
3247 /*
3248 * Decrement pm usage count upon cable disconnect
3249 * which was incremented upon cable connect in
3250 * OTG_STATE_B_IDLE state
3251 */
3252 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003253 work = 1;
3254 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3255 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3256 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3257 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3258 /*
3259 * Decrement pm usage count upon bus suspend.
3260 * Count was incremented either upon cable
3261 * connect in OTG_STATE_B_IDLE or host
3262 * initiated resume after bus suspend in
3263 * OTG_STATE_B_SUSPEND state
3264 */
3265 pm_runtime_mark_last_busy(mdwc->dev);
3266 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003267 }
3268 break;
3269
3270 case OTG_STATE_B_SUSPEND:
3271 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3272 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3273 mdwc->otg_state = OTG_STATE_B_IDLE;
3274 dwc3_otg_start_peripheral(mdwc, 0);
3275 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3276 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3277 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3278 /*
3279 * Increment pm usage count upon host
3280 * initiated resume. Count was decremented
3281 * upon bus suspend in
3282 * OTG_STATE_B_PERIPHERAL state.
3283 */
3284 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003285 }
3286 break;
3287
3288 case OTG_STATE_A_IDLE:
3289 /* Switch to A-Device*/
3290 if (test_bit(ID, &mdwc->inputs)) {
3291 dev_dbg(mdwc->dev, "id\n");
3292 mdwc->otg_state = OTG_STATE_B_IDLE;
3293 mdwc->vbus_retry_count = 0;
3294 work = 1;
3295 } else {
3296 mdwc->otg_state = OTG_STATE_A_HOST;
3297 ret = dwc3_otg_start_host(mdwc, 1);
3298 if ((ret == -EPROBE_DEFER) &&
3299 mdwc->vbus_retry_count < 3) {
3300 /*
3301 * Get regulator failed as regulator driver is
3302 * not up yet. Will try to start host after 1sec
3303 */
3304 mdwc->otg_state = OTG_STATE_A_IDLE;
3305 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3306 delay = VBUS_REG_CHECK_DELAY;
3307 work = 1;
3308 mdwc->vbus_retry_count++;
3309 } else if (ret) {
3310 dev_err(mdwc->dev, "unable to start host\n");
3311 mdwc->otg_state = OTG_STATE_A_IDLE;
3312 goto ret;
3313 }
3314 }
3315 break;
3316
3317 case OTG_STATE_A_HOST:
3318 if (test_bit(ID, &mdwc->inputs)) {
3319 dev_dbg(mdwc->dev, "id\n");
3320 dwc3_otg_start_host(mdwc, 0);
3321 mdwc->otg_state = OTG_STATE_B_IDLE;
3322 mdwc->vbus_retry_count = 0;
3323 work = 1;
3324 } else {
3325 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003326 if (dwc)
3327 pm_runtime_resume(&dwc->xhci->dev);
3328 }
3329 break;
3330
3331 default:
3332 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3333
3334 }
3335
3336 if (work)
3337 schedule_delayed_work(&mdwc->sm_work, delay);
3338
3339ret:
3340 return;
3341}
3342
3343#ifdef CONFIG_PM_SLEEP
3344static int dwc3_msm_pm_suspend(struct device *dev)
3345{
3346 int ret = 0;
3347 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3348 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3349
3350 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003351
3352 flush_workqueue(mdwc->dwc3_wq);
3353 if (!atomic_read(&dwc->in_lpm)) {
3354 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3355 return -EBUSY;
3356 }
3357
3358 ret = dwc3_msm_suspend(mdwc);
3359 if (!ret)
3360 atomic_set(&mdwc->pm_suspended, 1);
3361
3362 return ret;
3363}
3364
3365static int dwc3_msm_pm_resume(struct device *dev)
3366{
3367 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3368
3369 dev_dbg(dev, "dwc3-msm PM resume\n");
3370
Mayank Rana511f3b22016-08-02 12:00:11 -07003371 /* flush to avoid race in read/write of pm_suspended */
3372 flush_workqueue(mdwc->dwc3_wq);
3373 atomic_set(&mdwc->pm_suspended, 0);
3374
3375 /* kick in otg state machine */
3376 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3377
3378 return 0;
3379}
3380#endif
3381
3382#ifdef CONFIG_PM
3383static int dwc3_msm_runtime_idle(struct device *dev)
3384{
3385 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003386
3387 return 0;
3388}
3389
3390static int dwc3_msm_runtime_suspend(struct device *dev)
3391{
3392 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3393
3394 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003395
3396 return dwc3_msm_suspend(mdwc);
3397}
3398
3399static int dwc3_msm_runtime_resume(struct device *dev)
3400{
3401 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3402
3403 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003404
3405 return dwc3_msm_resume(mdwc);
3406}
3407#endif
3408
3409static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3410 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3411 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3412 dwc3_msm_runtime_idle)
3413};
3414
3415static const struct of_device_id of_dwc3_matach[] = {
3416 {
3417 .compatible = "qcom,dwc-usb3-msm",
3418 },
3419 { },
3420};
3421MODULE_DEVICE_TABLE(of, of_dwc3_matach);
3422
3423static struct platform_driver dwc3_msm_driver = {
3424 .probe = dwc3_msm_probe,
3425 .remove = dwc3_msm_remove,
3426 .driver = {
3427 .name = "msm-dwc3",
3428 .pm = &dwc3_msm_dev_pm_ops,
3429 .of_match_table = of_dwc3_matach,
3430 },
3431};
3432
3433MODULE_LICENSE("GPL v2");
3434MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
3435
3436static int dwc3_msm_init(void)
3437{
3438 return platform_driver_register(&dwc3_msm_driver);
3439}
3440module_init(dwc3_msm_init);
3441
3442static void __exit dwc3_msm_exit(void)
3443{
3444 platform_driver_unregister(&dwc3_msm_driver);
3445}
3446module_exit(dwc3_msm_exit);