blob: 19e8e1d46cd8822de4e96f445613d7bf784691bc [file] [log] [blame]
Mayank Rana511f3b22016-08-02 12:00:11 -07001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/delay.h>
30#include <linux/of.h>
31#include <linux/of_platform.h>
32#include <linux/of_gpio.h>
33#include <linux/list.h>
34#include <linux/uaccess.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070038#include <linux/regulator/consumer.h>
39#include <linux/pm_wakeup.h>
40#include <linux/power_supply.h>
41#include <linux/cdev.h>
42#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070043#include <linux/msm-bus.h>
44#include <linux/irq.h>
45#include <linux/extcon.h>
46
47#include "power.h"
48#include "core.h"
49#include "gadget.h"
50#include "dbm.h"
51#include "debug.h"
52#include "xhci.h"
53
54/* time out to wait for USB cable status notification (in ms)*/
55#define SM_INIT_TIMEOUT 30000
56
57/* AHB2PHY register offsets */
58#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
59
60/* AHB2PHY read/write waite value */
61#define ONE_READ_WRITE_WAIT 0x11
62
63/* cpu to fix usb interrupt */
64static int cpu_to_affin;
65module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
66MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
67
68/* XHCI registers */
69#define USB3_HCSPARAMS1 (0x4)
70#define USB3_PORTSC (0x420)
71
72/**
73 * USB QSCRATCH Hardware registers
74 *
75 */
76#define QSCRATCH_REG_OFFSET (0x000F8800)
77#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
78#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
79#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
80#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
81
82#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
83#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
84#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
85#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
86#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
87
88/* QSCRATCH_GENERAL_CFG register bit offset */
89#define PIPE_UTMI_CLK_SEL BIT(0)
90#define PIPE3_PHYSTATUS_SW BIT(3)
91#define PIPE_UTMI_CLK_DIS BIT(8)
92
93#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
94#define UTMI_OTG_VBUS_VALID BIT(20)
95#define SW_SESSVLD_SEL BIT(28)
96
97#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
98#define LANE0_PWR_PRESENT BIT(24)
99
100/* GSI related registers */
101#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
102#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
103
104#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
105#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
106#define GSI_CLK_EN_MASK BIT(12)
107#define BLOCK_GSI_WR_GO_MASK BIT(1)
108#define GSI_EN_MASK BIT(0)
109
110#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
111#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
112#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
113#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
114
115#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
116#define GSI_WR_CTRL_STATE_MASK BIT(15)
117
118struct dwc3_msm_req_complete {
119 struct list_head list_item;
120 struct usb_request *req;
121 void (*orig_complete)(struct usb_ep *ep,
122 struct usb_request *req);
123};
124
125enum dwc3_id_state {
126 DWC3_ID_GROUND = 0,
127 DWC3_ID_FLOAT,
128};
129
130/* for type c cable */
131enum plug_orientation {
132 ORIENTATION_NONE,
133 ORIENTATION_CC1,
134 ORIENTATION_CC2,
135};
136
137/* Input bits to state machine (mdwc->inputs) */
138
139#define ID 0
140#define B_SESS_VLD 1
141#define B_SUSPEND 2
142
143struct dwc3_msm {
144 struct device *dev;
145 void __iomem *base;
146 void __iomem *ahb2phy_base;
147 struct platform_device *dwc3;
148 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
149 struct list_head req_complete_list;
150 struct clk *xo_clk;
151 struct clk *core_clk;
152 long core_clk_rate;
153 struct clk *iface_clk;
154 struct clk *sleep_clk;
155 struct clk *utmi_clk;
156 unsigned int utmi_clk_rate;
157 struct clk *utmi_clk_src;
158 struct clk *bus_aggr_clk;
159 struct clk *cfg_ahb_clk;
160 struct regulator *dwc3_gdsc;
161
162 struct usb_phy *hs_phy, *ss_phy;
163
164 struct dbm *dbm;
165
166 /* VBUS regulator for host mode */
167 struct regulator *vbus_reg;
168 int vbus_retry_count;
169 bool resume_pending;
170 atomic_t pm_suspended;
171 int hs_phy_irq;
172 int ss_phy_irq;
173 struct work_struct resume_work;
174 struct work_struct restart_usb_work;
175 bool in_restart;
176 struct workqueue_struct *dwc3_wq;
177 struct delayed_work sm_work;
178 unsigned long inputs;
179 unsigned int max_power;
180 bool charging_disabled;
181 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700182 struct work_struct bus_vote_w;
183 unsigned int bus_vote;
184 u32 bus_perf_client;
185 struct msm_bus_scale_pdata *bus_scale_table;
186 struct power_supply *usb_psy;
187 bool in_host_mode;
188 unsigned int tx_fifo_size;
189 bool vbus_active;
190 bool suspend;
191 bool disable_host_mode_pm;
192 enum dwc3_id_state id_state;
193 unsigned long lpm_flags;
194#define MDWC3_SS_PHY_SUSPEND BIT(0)
195#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
196#define MDWC3_POWER_COLLAPSE BIT(2)
197
198 unsigned int irq_to_affin;
199 struct notifier_block dwc3_cpu_notifier;
200
201 struct extcon_dev *extcon_vbus;
202 struct extcon_dev *extcon_id;
203 struct notifier_block vbus_nb;
204 struct notifier_block id_nb;
205
206 int pwr_event_irq;
207 atomic_t in_p3;
208 unsigned int lpm_to_suspend_delay;
209 bool init;
210 enum plug_orientation typec_orientation;
211};
212
213#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
214#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
215#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
216
217#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
218#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
219#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
220
221#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
222#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
223#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
224
225#define DSTS_CONNECTSPD_SS 0x4
226
227
228static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
229static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
230
231/**
232 *
233 * Read register with debug info.
234 *
235 * @base - DWC3 base virtual address.
236 * @offset - register offset.
237 *
238 * @return u32
239 */
240static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
241{
242 u32 val = ioread32(base + offset);
243 return val;
244}
245
246/**
247 * Read register masked field with debug info.
248 *
249 * @base - DWC3 base virtual address.
250 * @offset - register offset.
251 * @mask - register bitmask.
252 *
253 * @return u32
254 */
255static inline u32 dwc3_msm_read_reg_field(void *base,
256 u32 offset,
257 const u32 mask)
258{
259 u32 shift = find_first_bit((void *)&mask, 32);
260 u32 val = ioread32(base + offset);
261
262 val &= mask; /* clear other bits */
263 val >>= shift;
264 return val;
265}
266
267/**
268 *
269 * Write register with debug info.
270 *
271 * @base - DWC3 base virtual address.
272 * @offset - register offset.
273 * @val - value to write.
274 *
275 */
276static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
277{
278 iowrite32(val, base + offset);
279}
280
281/**
282 * Write register masked field with debug info.
283 *
284 * @base - DWC3 base virtual address.
285 * @offset - register offset.
286 * @mask - register bitmask.
287 * @val - value to write.
288 *
289 */
290static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
291 const u32 mask, u32 val)
292{
293 u32 shift = find_first_bit((void *)&mask, 32);
294 u32 tmp = ioread32(base + offset);
295
296 tmp &= ~mask; /* clear written bits */
297 val = tmp | (val << shift);
298 iowrite32(val, base + offset);
299}
300
301/**
302 * Write register and read back masked value to confirm it is written
303 *
304 * @base - DWC3 base virtual address.
305 * @offset - register offset.
306 * @mask - register bitmask specifying what should be updated
307 * @val - value to write.
308 *
309 */
310static inline void dwc3_msm_write_readback(void *base, u32 offset,
311 const u32 mask, u32 val)
312{
313 u32 write_val, tmp = ioread32(base + offset);
314
315 tmp &= ~mask; /* retain other bits */
316 write_val = tmp | val;
317
318 iowrite32(write_val, base + offset);
319
320 /* Read back to see if val was written */
321 tmp = ioread32(base + offset);
322 tmp &= mask; /* clear other bits */
323
324 if (tmp != val)
325 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
326 __func__, val, offset);
327}
328
329static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
330{
331 int i, num_ports;
332 u32 reg;
333
334 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
335 num_ports = HCS_MAX_PORTS(reg);
336
337 for (i = 0; i < num_ports; i++) {
338 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
339 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
340 return true;
341 }
342
343 return false;
344}
345
346static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
347{
348 u8 speed;
349
350 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
351 return !!(speed & DSTS_CONNECTSPD_SS);
352}
353
354static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
355{
356 if (mdwc->in_host_mode)
357 return dwc3_msm_is_host_superspeed(mdwc);
358
359 return dwc3_msm_is_dev_superspeed(mdwc);
360}
361
362#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
363/**
364 * Configure the DBM with the BAM's data fifo.
365 * This function is called by the USB BAM Driver
366 * upon initialization.
367 *
368 * @ep - pointer to usb endpoint.
369 * @addr - address of data fifo.
370 * @size - size of data fifo.
371 *
372 */
373int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
374 u32 size, u8 dst_pipe_idx)
375{
376 struct dwc3_ep *dep = to_dwc3_ep(ep);
377 struct dwc3 *dwc = dep->dwc;
378 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
379
380 dev_dbg(mdwc->dev, "%s\n", __func__);
381
382 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
383 dst_pipe_idx);
384}
385
386
387/**
388* Cleanups for msm endpoint on request complete.
389*
390* Also call original request complete.
391*
392* @usb_ep - pointer to usb_ep instance.
393* @request - pointer to usb_request instance.
394*
395* @return int - 0 on success, negative on error.
396*/
397static void dwc3_msm_req_complete_func(struct usb_ep *ep,
398 struct usb_request *request)
399{
400 struct dwc3_ep *dep = to_dwc3_ep(ep);
401 struct dwc3 *dwc = dep->dwc;
402 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
403 struct dwc3_msm_req_complete *req_complete = NULL;
404
405 /* Find original request complete function and remove it from list */
406 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
407 if (req_complete->req == request)
408 break;
409 }
410 if (!req_complete || req_complete->req != request) {
411 dev_err(dep->dwc->dev, "%s: could not find the request\n",
412 __func__);
413 return;
414 }
415 list_del(&req_complete->list_item);
416
417 /*
418 * Release another one TRB to the pool since DBM queue took 2 TRBs
419 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
420 * released only one.
421 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700422 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700423
424 /* Unconfigure dbm ep */
425 dbm_ep_unconfig(mdwc->dbm, dep->number);
426
427 /*
428 * If this is the last endpoint we unconfigured, than reset also
429 * the event buffers; unless unconfiguring the ep due to lpm,
430 * in which case the event buffer only gets reset during the
431 * block reset.
432 */
433 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
434 !dbm_reset_ep_after_lpm(mdwc->dbm))
435 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
436
437 /*
438 * Call original complete function, notice that dwc->lock is already
439 * taken by the caller of this function (dwc3_gadget_giveback()).
440 */
441 request->complete = req_complete->orig_complete;
442 if (request->complete)
443 request->complete(ep, request);
444
445 kfree(req_complete);
446}
447
448
449/**
450* Helper function
451*
452* Reset DBM endpoint.
453*
454* @mdwc - pointer to dwc3_msm instance.
455* @dep - pointer to dwc3_ep instance.
456*
457* @return int - 0 on success, negative on error.
458*/
459static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
460{
461 int ret;
462
463 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
464
465 /* Reset the dbm endpoint */
466 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
467 if (ret) {
468 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
469 __func__);
470 return ret;
471 }
472
473 /*
474 * The necessary delay between asserting and deasserting the dbm ep
475 * reset is based on the number of active endpoints. If there is more
476 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
477 * delay will suffice.
478 */
479 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
480 usleep_range(1000, 1200);
481 else
482 udelay(10);
483 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
484 if (ret) {
485 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
486 __func__);
487 return ret;
488 }
489
490 return 0;
491}
492
493/**
494* Reset the DBM endpoint which is linked to the given USB endpoint.
495*
496* @usb_ep - pointer to usb_ep instance.
497*
498* @return int - 0 on success, negative on error.
499*/
500
501int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
502{
503 struct dwc3_ep *dep = to_dwc3_ep(ep);
504 struct dwc3 *dwc = dep->dwc;
505 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
506
507 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
508}
509EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
510
511
512/**
513* Helper function.
514* See the header of the dwc3_msm_ep_queue function.
515*
516* @dwc3_ep - pointer to dwc3_ep instance.
517* @req - pointer to dwc3_request instance.
518*
519* @return int - 0 on success, negative on error.
520*/
521static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
522{
523 struct dwc3_trb *trb;
524 struct dwc3_trb *trb_link;
525 struct dwc3_gadget_ep_cmd_params params;
526 u32 cmd;
527 int ret = 0;
528
Mayank Rana83ad5822016-08-09 14:17:22 -0700529 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700530 * this request is issued with start transfer. The request will be out
531 * from this list in 2 cases. The first is that the transfer will be
532 * completed (not if the transfer is endless using a circular TRBs with
533 * with link TRB). The second case is an option to do stop stransfer,
534 * this can be initiated by the function driver when calling dequeue.
535 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700536 req->started = true;
537 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700538
539 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700540 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
541 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700542 memset(trb, 0, sizeof(*trb));
543
544 req->trb = trb;
545 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
546 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
547 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
548 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
549 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
550
551 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700552 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
553 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700554 memset(trb_link, 0, sizeof(*trb_link));
555
556 trb_link->bpl = lower_32_bits(req->trb_dma);
557 trb_link->bph = DBM_TRB_BIT |
558 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
559 trb_link->size = 0;
560 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
561
562 /*
563 * Now start the transfer
564 */
565 memset(&params, 0, sizeof(params));
566 params.param0 = 0; /* TDAddr High */
567 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
568
569 /* DBM requires IOC to be set */
570 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700571 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700572 if (ret < 0) {
573 dev_dbg(dep->dwc->dev,
574 "%s: failed to send STARTTRANSFER command\n",
575 __func__);
576
577 list_del(&req->list);
578 return ret;
579 }
580 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700581 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700582
583 return ret;
584}
585
586/**
587* Queue a usb request to the DBM endpoint.
588* This function should be called after the endpoint
589* was enabled by the ep_enable.
590*
591* This function prepares special structure of TRBs which
592* is familiar with the DBM HW, so it will possible to use
593* this endpoint in DBM mode.
594*
595* The TRBs prepared by this function, is one normal TRB
596* which point to a fake buffer, followed by a link TRB
597* that points to the first TRB.
598*
599* The API of this function follow the regular API of
600* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
601*
602* @usb_ep - pointer to usb_ep instance.
603* @request - pointer to usb_request instance.
604* @gfp_flags - possible flags.
605*
606* @return int - 0 on success, negative on error.
607*/
608static int dwc3_msm_ep_queue(struct usb_ep *ep,
609 struct usb_request *request, gfp_t gfp_flags)
610{
611 struct dwc3_request *req = to_dwc3_request(request);
612 struct dwc3_ep *dep = to_dwc3_ep(ep);
613 struct dwc3 *dwc = dep->dwc;
614 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
615 struct dwc3_msm_req_complete *req_complete;
616 unsigned long flags;
617 int ret = 0, size;
618 u8 bam_pipe;
619 bool producer;
620 bool disable_wb;
621 bool internal_mem;
622 bool ioc;
623 bool superspeed;
624
625 if (!(request->udc_priv & MSM_SPS_MODE)) {
626 /* Not SPS mode, call original queue */
627 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
628 __func__);
629
630 return (mdwc->original_ep_ops[dep->number])->queue(ep,
631 request,
632 gfp_flags);
633 }
634
635 /* HW restriction regarding TRB size (8KB) */
636 if (req->request.length < 0x2000) {
637 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
638 return -EINVAL;
639 }
640
641 /*
642 * Override req->complete function, but before doing that,
643 * store it's original pointer in the req_complete_list.
644 */
645 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
646 if (!req_complete)
647 return -ENOMEM;
648
649 req_complete->req = request;
650 req_complete->orig_complete = request->complete;
651 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
652 request->complete = dwc3_msm_req_complete_func;
653
654 /*
655 * Configure the DBM endpoint
656 */
657 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
658 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
659 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
660 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
661 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
662
663 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
664 disable_wb, internal_mem, ioc);
665 if (ret < 0) {
666 dev_err(mdwc->dev,
667 "error %d after calling dbm_ep_config\n", ret);
668 return ret;
669 }
670
671 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
672 __func__, request, ep->name, request->length);
673 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
674 dbm_event_buffer_config(mdwc->dbm,
675 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
676 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
677 DWC3_GEVNTSIZ_SIZE(size));
678
679 /*
680 * We must obtain the lock of the dwc3 core driver,
681 * including disabling interrupts, so we will be sure
682 * that we are the only ones that configure the HW device
683 * core and ensure that we queuing the request will finish
684 * as soon as possible so we will release back the lock.
685 */
686 spin_lock_irqsave(&dwc->lock, flags);
687 if (!dep->endpoint.desc) {
688 dev_err(mdwc->dev,
689 "%s: trying to queue request %p to disabled ep %s\n",
690 __func__, request, ep->name);
691 ret = -EPERM;
692 goto err;
693 }
694
695 if (dep->number == 0 || dep->number == 1) {
696 dev_err(mdwc->dev,
697 "%s: trying to queue dbm request %p to control ep %s\n",
698 __func__, request, ep->name);
699 ret = -EPERM;
700 goto err;
701 }
702
703
Mayank Rana83ad5822016-08-09 14:17:22 -0700704 if (dep->trb_dequeue != dep->trb_enqueue ||
705 !list_empty(&dep->pending_list)
706 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700707 dev_err(mdwc->dev,
708 "%s: trying to queue dbm request %p tp ep %s\n",
709 __func__, request, ep->name);
710 ret = -EPERM;
711 goto err;
712 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700713 dep->trb_dequeue = 0;
714 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700715 }
716
717 ret = __dwc3_msm_ep_queue(dep, req);
718 if (ret < 0) {
719 dev_err(mdwc->dev,
720 "error %d after calling __dwc3_msm_ep_queue\n", ret);
721 goto err;
722 }
723
724 spin_unlock_irqrestore(&dwc->lock, flags);
725 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
726 dbm_set_speed(mdwc->dbm, (u8)superspeed);
727
728 return 0;
729
730err:
731 spin_unlock_irqrestore(&dwc->lock, flags);
732 kfree(req_complete);
733 return ret;
734}
735
736/*
737* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
738*
739* @usb_ep - pointer to usb_ep instance.
740*
741* @return int - XferRscIndex
742*/
743static inline int gsi_get_xfer_index(struct usb_ep *ep)
744{
745 struct dwc3_ep *dep = to_dwc3_ep(ep);
746
747 return dep->resource_index;
748}
749
750/*
751* Fills up the GSI channel information needed in call to IPA driver
752* for GSI channel creation.
753*
754* @usb_ep - pointer to usb_ep instance.
755* @ch_info - output parameter with requested channel info
756*/
757static void gsi_get_channel_info(struct usb_ep *ep,
758 struct gsi_channel_info *ch_info)
759{
760 struct dwc3_ep *dep = to_dwc3_ep(ep);
761 int last_trb_index = 0;
762 struct dwc3 *dwc = dep->dwc;
763 struct usb_gsi_request *request = ch_info->ch_req;
764
765 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
766 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700767 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700768 ch_info->depcmd_hi_addr = 0;
769
770 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
771 &dep->trb_pool[0]);
772 /* Convert to multipled of 1KB */
773 ch_info->const_buffer_size = request->buf_len/1024;
774
775 /* IN direction */
776 if (dep->direction) {
777 /*
778 * Multiply by size of each TRB for xfer_ring_len in bytes.
779 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
780 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
781 */
782 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
783 last_trb_index = 2 * request->num_bufs + 2;
784 } else { /* OUT direction */
785 /*
786 * Multiply by size of each TRB for xfer_ring_len in bytes.
787 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
788 * LINK TRB.
789 */
790 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
791 last_trb_index = request->num_bufs + 1;
792 }
793
794 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
795 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
796 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
797 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
798 DWC3_GEVNTCOUNT(ep->ep_intr_num));
799 ch_info->gevntcount_hi_addr = 0;
800
801 dev_dbg(dwc->dev,
802 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
803 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
804 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
805}
806
807/*
808* Perform StartXfer on GSI EP. Stores XferRscIndex.
809*
810* @usb_ep - pointer to usb_ep instance.
811*
812* @return int - 0 on success
813*/
814static int gsi_startxfer_for_ep(struct usb_ep *ep)
815{
816 int ret;
817 struct dwc3_gadget_ep_cmd_params params;
818 u32 cmd;
819 struct dwc3_ep *dep = to_dwc3_ep(ep);
820 struct dwc3 *dwc = dep->dwc;
821
822 memset(&params, 0, sizeof(params));
823 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
824 params.param0 |= (ep->ep_intr_num << 16);
825 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
826 &dep->trb_pool[0]));
827 cmd = DWC3_DEPCMD_STARTTRANSFER;
828 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700829 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700830
831 if (ret < 0)
832 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700833 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700834 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
835 return ret;
836}
837
838/*
839* Store Ring Base and Doorbell Address for GSI EP
840* for GSI channel creation.
841*
842* @usb_ep - pointer to usb_ep instance.
843* @dbl_addr - Doorbell address obtained from IPA driver
844*/
845static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
846{
847 struct dwc3_ep *dep = to_dwc3_ep(ep);
848 struct dwc3 *dwc = dep->dwc;
849 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
850 int n = ep->ep_intr_num - 1;
851
852 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
853 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
854 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
855
856 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
857 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
858 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
859 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
860}
861
862/*
863* Rings Doorbell for IN GSI Channel
864*
865* @usb_ep - pointer to usb_ep instance.
866* @request - pointer to GSI request. This is used to pass in the
867* address of the GSI doorbell obtained from IPA driver
868*/
869static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
870{
871 void __iomem *gsi_dbl_address_lsb;
872 void __iomem *gsi_dbl_address_msb;
873 dma_addr_t offset;
874 u64 dbl_addr = *((u64 *)request->buf_base_addr);
875 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
876 u32 dbl_hi_addr = (dbl_addr >> 32);
877 u32 num_trbs = (request->num_bufs * 2 + 2);
878 struct dwc3_ep *dep = to_dwc3_ep(ep);
879 struct dwc3 *dwc = dep->dwc;
880 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
881
882 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
883 dbl_lo_addr, sizeof(u32));
884 if (!gsi_dbl_address_lsb)
885 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
886
887 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
888 dbl_hi_addr, sizeof(u32));
889 if (!gsi_dbl_address_msb)
890 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
891
892 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
893 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
894 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
895
896 writel_relaxed(offset, gsi_dbl_address_lsb);
897 writel_relaxed(0, gsi_dbl_address_msb);
898}
899
900/*
901* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
902*
903* @usb_ep - pointer to usb_ep instance.
904* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
905*
906* @return int - 0 on success
907*/
908static int gsi_updatexfer_for_ep(struct usb_ep *ep,
909 struct usb_gsi_request *request)
910{
911 int i;
912 int ret;
913 u32 cmd;
914 int num_trbs = request->num_bufs + 1;
915 struct dwc3_trb *trb;
916 struct dwc3_gadget_ep_cmd_params params;
917 struct dwc3_ep *dep = to_dwc3_ep(ep);
918 struct dwc3 *dwc = dep->dwc;
919
920 for (i = 0; i < num_trbs - 1; i++) {
921 trb = &dep->trb_pool[i];
922 trb->ctrl |= DWC3_TRB_CTRL_HWO;
923 }
924
925 memset(&params, 0, sizeof(params));
926 cmd = DWC3_DEPCMD_UPDATETRANSFER;
927 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700928 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700929 dep->flags |= DWC3_EP_BUSY;
930 if (ret < 0)
931 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
932 return ret;
933}
934
935/*
936* Perform EndXfer on particular GSI EP.
937*
938* @usb_ep - pointer to usb_ep instance.
939*/
940static void gsi_endxfer_for_ep(struct usb_ep *ep)
941{
942 struct dwc3_ep *dep = to_dwc3_ep(ep);
943 struct dwc3 *dwc = dep->dwc;
944
945 dwc3_stop_active_transfer(dwc, dep->number, true);
946}
947
948/*
949* Allocates and configures TRBs for GSI EPs.
950*
951* @usb_ep - pointer to usb_ep instance.
952* @request - pointer to GSI request.
953*
954* @return int - 0 on success
955*/
956static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
957{
958 int i = 0;
959 dma_addr_t buffer_addr = req->dma;
960 struct dwc3_ep *dep = to_dwc3_ep(ep);
961 struct dwc3 *dwc = dep->dwc;
962 struct dwc3_trb *trb;
963 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
964 : (req->num_bufs + 1);
965
966 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
967 num_trbs * sizeof(struct dwc3_trb),
968 num_trbs * sizeof(struct dwc3_trb), 0);
969 if (!dep->trb_dma_pool) {
970 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
971 dep->name);
972 return -ENOMEM;
973 }
974
975 dep->num_trbs = num_trbs;
976
977 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
978 GFP_KERNEL, &dep->trb_pool_dma);
979 if (!dep->trb_pool) {
980 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
981 dep->name);
982 return -ENOMEM;
983 }
984
985 /* IN direction */
986 if (dep->direction) {
987 for (i = 0; i < num_trbs ; i++) {
988 trb = &dep->trb_pool[i];
989 memset(trb, 0, sizeof(*trb));
990 /* Set up first n+1 TRBs for ZLPs */
991 if (i < (req->num_bufs + 1)) {
992 trb->bpl = 0;
993 trb->bph = 0;
994 trb->size = 0;
995 trb->ctrl = DWC3_TRBCTL_NORMAL
996 | DWC3_TRB_CTRL_IOC;
997 continue;
998 }
999
1000 /* Setup n TRBs pointing to valid buffers */
1001 trb->bpl = lower_32_bits(buffer_addr);
1002 trb->bph = 0;
1003 trb->size = 0;
1004 trb->ctrl = DWC3_TRBCTL_NORMAL
1005 | DWC3_TRB_CTRL_IOC;
1006 buffer_addr += req->buf_len;
1007
1008 /* Set up the Link TRB at the end */
1009 if (i == (num_trbs - 1)) {
1010 trb->bpl = dwc3_trb_dma_offset(dep,
1011 &dep->trb_pool[0]);
1012 trb->bph = (1 << 23) | (1 << 21)
1013 | (ep->ep_intr_num << 16);
1014 trb->size = 0;
1015 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1016 | DWC3_TRB_CTRL_HWO;
1017 }
1018 }
1019 } else { /* OUT direction */
1020
1021 for (i = 0; i < num_trbs ; i++) {
1022
1023 trb = &dep->trb_pool[i];
1024 memset(trb, 0, sizeof(*trb));
1025 trb->bpl = lower_32_bits(buffer_addr);
1026 trb->bph = 0;
1027 trb->size = req->buf_len;
1028 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1029 | DWC3_TRB_CTRL_CSP
1030 | DWC3_TRB_CTRL_ISP_IMI;
1031 buffer_addr += req->buf_len;
1032
1033 /* Set up the Link TRB at the end */
1034 if (i == (num_trbs - 1)) {
1035 trb->bpl = dwc3_trb_dma_offset(dep,
1036 &dep->trb_pool[0]);
1037 trb->bph = (1 << 23) | (1 << 21)
1038 | (ep->ep_intr_num << 16);
1039 trb->size = 0;
1040 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1041 | DWC3_TRB_CTRL_HWO;
1042 }
1043 }
1044 }
1045 return 0;
1046}
1047
1048/*
1049* Frees TRBs for GSI EPs.
1050*
1051* @usb_ep - pointer to usb_ep instance.
1052*
1053*/
1054static void gsi_free_trbs(struct usb_ep *ep)
1055{
1056 struct dwc3_ep *dep = to_dwc3_ep(ep);
1057
1058 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1059 return;
1060
1061 /* Free TRBs and TRB pool for EP */
1062 if (dep->trb_dma_pool) {
1063 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1064 dep->trb_pool_dma);
1065 dma_pool_destroy(dep->trb_dma_pool);
1066 dep->trb_pool = NULL;
1067 dep->trb_pool_dma = 0;
1068 dep->trb_dma_pool = NULL;
1069 }
1070}
1071/*
1072* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1073*
1074* @usb_ep - pointer to usb_ep instance.
1075* @request - pointer to GSI request.
1076*/
1077static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1078{
1079 struct dwc3_ep *dep = to_dwc3_ep(ep);
1080 struct dwc3 *dwc = dep->dwc;
1081 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1082 struct dwc3_gadget_ep_cmd_params params;
1083 const struct usb_endpoint_descriptor *desc = ep->desc;
1084 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1085 u32 reg;
1086
1087 memset(&params, 0x00, sizeof(params));
1088
1089 /* Configure GSI EP */
1090 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1091 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1092
1093 /* Burst size is only needed in SuperSpeed mode */
1094 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1095 u32 burst = dep->endpoint.maxburst - 1;
1096
1097 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1098 }
1099
1100 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1101 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1102 | DWC3_DEPCFG_STREAM_EVENT_EN;
1103 dep->stream_capable = true;
1104 }
1105
1106 /* Set EP number */
1107 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1108
1109 /* Set interrupter number for GSI endpoints */
1110 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1111
1112 /* Enable XferInProgress and XferComplete Interrupts */
1113 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1114 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1115 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1116 /*
1117 * We must use the lower 16 TX FIFOs even though
1118 * HW might have more
1119 */
1120 /* Remove FIFO Number for GSI EP*/
1121 if (dep->direction)
1122 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1123
1124 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1125
1126 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1127 params.param0, params.param1, params.param2, dep->name);
1128
Mayank Rana83ad5822016-08-09 14:17:22 -07001129 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001130
1131 /* Set XferRsc Index for GSI EP */
1132 if (!(dep->flags & DWC3_EP_ENABLED)) {
1133 memset(&params, 0x00, sizeof(params));
1134 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001135 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001136 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1137
1138 dep->endpoint.desc = desc;
1139 dep->comp_desc = comp_desc;
1140 dep->type = usb_endpoint_type(desc);
1141 dep->flags |= DWC3_EP_ENABLED;
1142 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1143 reg |= DWC3_DALEPENA_EP(dep->number);
1144 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1145 }
1146
1147}
1148
1149/*
1150* Enables USB wrapper for GSI
1151*
1152* @usb_ep - pointer to usb_ep instance.
1153*/
1154static void gsi_enable(struct usb_ep *ep)
1155{
1156 struct dwc3_ep *dep = to_dwc3_ep(ep);
1157 struct dwc3 *dwc = dep->dwc;
1158 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1159
1160 dwc3_msm_write_reg_field(mdwc->base,
1161 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1162 dwc3_msm_write_reg_field(mdwc->base,
1163 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1164 dwc3_msm_write_reg_field(mdwc->base,
1165 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1166 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1167 dwc3_msm_write_reg_field(mdwc->base,
1168 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1169}
1170
1171/*
1172* Block or allow doorbell towards GSI
1173*
1174* @usb_ep - pointer to usb_ep instance.
1175* @request - pointer to GSI request. In this case num_bufs is used as a bool
1176* to set or clear the doorbell bit
1177*/
1178static void gsi_set_clear_dbell(struct usb_ep *ep,
1179 bool block_db)
1180{
1181
1182 struct dwc3_ep *dep = to_dwc3_ep(ep);
1183 struct dwc3 *dwc = dep->dwc;
1184 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1185
1186 dwc3_msm_write_reg_field(mdwc->base,
1187 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1188}
1189
1190/*
1191* Performs necessary checks before stopping GSI channels
1192*
1193* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1194*/
1195static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1196{
1197 u32 timeout = 1500;
1198 u32 reg = 0;
1199 struct dwc3_ep *dep = to_dwc3_ep(ep);
1200 struct dwc3 *dwc = dep->dwc;
1201 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1202
1203 while (dwc3_msm_read_reg_field(mdwc->base,
1204 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1205 if (!timeout--) {
1206 dev_err(mdwc->dev,
1207 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1208 return false;
1209 }
1210 }
1211 /* Check for U3 only if we are not handling Function Suspend */
1212 if (!f_suspend) {
1213 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1214 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1215 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1216 return false;
1217 }
1218 }
1219
1220 return true;
1221}
1222
1223
1224/**
1225* Performs GSI operations or GSI EP related operations.
1226*
1227* @usb_ep - pointer to usb_ep instance.
1228* @op_data - pointer to opcode related data.
1229* @op - GSI related or GSI EP related op code.
1230*
1231* @return int - 0 on success, negative on error.
1232* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1233*/
1234static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1235 void *op_data, enum gsi_ep_op op)
1236{
1237 u32 ret = 0;
1238 struct dwc3_ep *dep = to_dwc3_ep(ep);
1239 struct dwc3 *dwc = dep->dwc;
1240 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1241 struct usb_gsi_request *request;
1242 struct gsi_channel_info *ch_info;
1243 bool block_db, f_suspend;
1244
1245 switch (op) {
1246 case GSI_EP_OP_PREPARE_TRBS:
1247 request = (struct usb_gsi_request *)op_data;
1248 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1249 ret = gsi_prepare_trbs(ep, request);
1250 break;
1251 case GSI_EP_OP_FREE_TRBS:
1252 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1253 gsi_free_trbs(ep);
1254 break;
1255 case GSI_EP_OP_CONFIG:
1256 request = (struct usb_gsi_request *)op_data;
1257 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
1258 gsi_configure_ep(ep, request);
1259 break;
1260 case GSI_EP_OP_STARTXFER:
1261 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
1262 ret = gsi_startxfer_for_ep(ep);
1263 break;
1264 case GSI_EP_OP_GET_XFER_IDX:
1265 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1266 ret = gsi_get_xfer_index(ep);
1267 break;
1268 case GSI_EP_OP_STORE_DBL_INFO:
1269 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1270 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1271 break;
1272 case GSI_EP_OP_ENABLE_GSI:
1273 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1274 gsi_enable(ep);
1275 break;
1276 case GSI_EP_OP_GET_CH_INFO:
1277 ch_info = (struct gsi_channel_info *)op_data;
1278 gsi_get_channel_info(ep, ch_info);
1279 break;
1280 case GSI_EP_OP_RING_IN_DB:
1281 request = (struct usb_gsi_request *)op_data;
1282 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1283 gsi_ring_in_db(ep, request);
1284 break;
1285 case GSI_EP_OP_UPDATEXFER:
1286 request = (struct usb_gsi_request *)op_data;
1287 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
1288 ret = gsi_updatexfer_for_ep(ep, request);
1289 break;
1290 case GSI_EP_OP_ENDXFER:
1291 request = (struct usb_gsi_request *)op_data;
1292 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
1293 gsi_endxfer_for_ep(ep);
1294 break;
1295 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1296 block_db = *((bool *)op_data);
1297 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1298 block_db);
1299 gsi_set_clear_dbell(ep, block_db);
1300 break;
1301 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1302 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1303 f_suspend = *((bool *)op_data);
1304 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1305 break;
1306 case GSI_EP_OP_DISABLE:
1307 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1308 ret = ep->ops->disable(ep);
1309 break;
1310 default:
1311 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1312 }
1313
1314 return ret;
1315}
1316
1317/**
1318 * Configure MSM endpoint.
1319 * This function do specific configurations
1320 * to an endpoint which need specific implementaion
1321 * in the MSM architecture.
1322 *
1323 * This function should be called by usb function/class
1324 * layer which need a support from the specific MSM HW
1325 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1326 *
1327 * @ep - a pointer to some usb_ep instance
1328 *
1329 * @return int - 0 on success, negetive on error.
1330 */
1331int msm_ep_config(struct usb_ep *ep)
1332{
1333 struct dwc3_ep *dep = to_dwc3_ep(ep);
1334 struct dwc3 *dwc = dep->dwc;
1335 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1336 struct usb_ep_ops *new_ep_ops;
1337
1338
1339 /* Save original ep ops for future restore*/
1340 if (mdwc->original_ep_ops[dep->number]) {
1341 dev_err(mdwc->dev,
1342 "ep [%s,%d] already configured as msm endpoint\n",
1343 ep->name, dep->number);
1344 return -EPERM;
1345 }
1346 mdwc->original_ep_ops[dep->number] = ep->ops;
1347
1348 /* Set new usb ops as we like */
1349 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1350 if (!new_ep_ops)
1351 return -ENOMEM;
1352
1353 (*new_ep_ops) = (*ep->ops);
1354 new_ep_ops->queue = dwc3_msm_ep_queue;
1355 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1356 ep->ops = new_ep_ops;
1357
1358 /*
1359 * Do HERE more usb endpoint configurations
1360 * which are specific to MSM.
1361 */
1362
1363 return 0;
1364}
1365EXPORT_SYMBOL(msm_ep_config);
1366
1367/**
1368 * Un-configure MSM endpoint.
1369 * Tear down configurations done in the
1370 * dwc3_msm_ep_config function.
1371 *
1372 * @ep - a pointer to some usb_ep instance
1373 *
1374 * @return int - 0 on success, negative on error.
1375 */
1376int msm_ep_unconfig(struct usb_ep *ep)
1377{
1378 struct dwc3_ep *dep = to_dwc3_ep(ep);
1379 struct dwc3 *dwc = dep->dwc;
1380 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1381 struct usb_ep_ops *old_ep_ops;
1382
1383 /* Restore original ep ops */
1384 if (!mdwc->original_ep_ops[dep->number]) {
1385 dev_err(mdwc->dev,
1386 "ep [%s,%d] was not configured as msm endpoint\n",
1387 ep->name, dep->number);
1388 return -EINVAL;
1389 }
1390 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1391 ep->ops = mdwc->original_ep_ops[dep->number];
1392 mdwc->original_ep_ops[dep->number] = NULL;
1393 kfree(old_ep_ops);
1394
1395 /*
1396 * Do HERE more usb endpoint un-configurations
1397 * which are specific to MSM.
1398 */
1399
1400 return 0;
1401}
1402EXPORT_SYMBOL(msm_ep_unconfig);
1403#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1404
1405static void dwc3_resume_work(struct work_struct *w);
1406
1407static void dwc3_restart_usb_work(struct work_struct *w)
1408{
1409 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1410 restart_usb_work);
1411 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1412 unsigned int timeout = 50;
1413
1414 dev_dbg(mdwc->dev, "%s\n", __func__);
1415
1416 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1417 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1418 return;
1419 }
1420
1421 /* guard against concurrent VBUS handling */
1422 mdwc->in_restart = true;
1423
1424 if (!mdwc->vbus_active) {
1425 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1426 dwc->err_evt_seen = false;
1427 mdwc->in_restart = false;
1428 return;
1429 }
1430
Mayank Rana511f3b22016-08-02 12:00:11 -07001431 /* Reset active USB connection */
1432 dwc3_resume_work(&mdwc->resume_work);
1433
1434 /* Make sure disconnect is processed before sending connect */
1435 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1436 msleep(20);
1437
1438 if (!timeout) {
1439 dev_dbg(mdwc->dev,
1440 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001441 pm_runtime_suspend(mdwc->dev);
1442 }
1443
1444 /* Force reconnect only if cable is still connected */
1445 if (mdwc->vbus_active) {
1446 mdwc->in_restart = false;
1447 dwc3_resume_work(&mdwc->resume_work);
1448 }
1449
1450 dwc->err_evt_seen = false;
1451 flush_delayed_work(&mdwc->sm_work);
1452}
1453
1454/*
1455 * Check whether the DWC3 requires resetting the ep
1456 * after going to Low Power Mode (lpm)
1457 */
1458bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1459{
1460 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1461 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1462
1463 return dbm_reset_ep_after_lpm(mdwc->dbm);
1464}
1465EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1466
1467/*
1468 * Config Global Distributed Switch Controller (GDSC)
1469 * to support controller power collapse
1470 */
1471static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1472{
1473 int ret;
1474
1475 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1476 return -EPERM;
1477
1478 if (on) {
1479 ret = regulator_enable(mdwc->dwc3_gdsc);
1480 if (ret) {
1481 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1482 return ret;
1483 }
1484 } else {
1485 ret = regulator_disable(mdwc->dwc3_gdsc);
1486 if (ret) {
1487 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1488 return ret;
1489 }
1490 }
1491
1492 return ret;
1493}
1494
1495static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1496{
1497 int ret = 0;
1498
1499 if (assert) {
1500 disable_irq(mdwc->pwr_event_irq);
1501 /* Using asynchronous block reset to the hardware */
1502 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1503 clk_disable_unprepare(mdwc->utmi_clk);
1504 clk_disable_unprepare(mdwc->sleep_clk);
1505 clk_disable_unprepare(mdwc->core_clk);
1506 clk_disable_unprepare(mdwc->iface_clk);
1507 ret = clk_reset(mdwc->core_clk, CLK_RESET_ASSERT);
1508 if (ret)
1509 dev_err(mdwc->dev, "dwc3 core_clk assert failed\n");
1510 } else {
1511 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
1512 ret = clk_reset(mdwc->core_clk, CLK_RESET_DEASSERT);
1513 ndelay(200);
1514 clk_prepare_enable(mdwc->iface_clk);
1515 clk_prepare_enable(mdwc->core_clk);
1516 clk_prepare_enable(mdwc->sleep_clk);
1517 clk_prepare_enable(mdwc->utmi_clk);
1518 if (ret)
1519 dev_err(mdwc->dev, "dwc3 core_clk deassert failed\n");
1520 enable_irq(mdwc->pwr_event_irq);
1521 }
1522
1523 return ret;
1524}
1525
1526static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1527{
1528 u32 guctl, gfladj = 0;
1529
1530 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1531 guctl &= ~DWC3_GUCTL_REFCLKPER;
1532
1533 /* GFLADJ register is used starting with revision 2.50a */
1534 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1535 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1536 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1537 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1538 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1539 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1540 }
1541
1542 /* Refer to SNPS Databook Table 6-55 for calculations used */
1543 switch (mdwc->utmi_clk_rate) {
1544 case 19200000:
1545 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1546 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1547 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1548 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1549 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1550 break;
1551 case 24000000:
1552 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1553 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1554 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1555 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1556 break;
1557 default:
1558 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1559 mdwc->utmi_clk_rate);
1560 break;
1561 }
1562
1563 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1564 if (gfladj)
1565 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1566}
1567
1568/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1569static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1570{
1571 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1572 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1573 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1574 BIT(2), 1);
1575
1576 /*
1577 * Enable master clock for RAMs to allow BAM to access RAMs when
1578 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1579 * are seen where RAM clocks get turned OFF in SS mode
1580 */
1581 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1582 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1583
1584}
1585
1586static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1587{
1588 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1589 u32 reg;
1590
1591 if (dwc->revision < DWC3_REVISION_230A)
1592 return;
1593
1594 switch (event) {
1595 case DWC3_CONTROLLER_ERROR_EVENT:
1596 dev_info(mdwc->dev,
1597 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1598 dwc->irq_cnt);
1599
1600 dwc3_gadget_disable_irq(dwc);
1601
1602 /* prevent core from generating interrupts until recovery */
1603 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1604 reg |= DWC3_GCTL_CORESOFTRESET;
1605 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1606
1607 /* restart USB which performs full reset and reconnect */
1608 schedule_work(&mdwc->restart_usb_work);
1609 break;
1610 case DWC3_CONTROLLER_RESET_EVENT:
1611 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1612 /* HS & SSPHYs get reset as part of core soft reset */
1613 dwc3_msm_qscratch_reg_init(mdwc);
1614 break;
1615 case DWC3_CONTROLLER_POST_RESET_EVENT:
1616 dev_dbg(mdwc->dev,
1617 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1618
1619 /*
1620 * Below sequence is used when controller is working without
1621 * having ssphy and only USB high speed is supported.
1622 */
1623 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1624 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1625 dwc3_msm_read_reg(mdwc->base,
1626 QSCRATCH_GENERAL_CFG)
1627 | PIPE_UTMI_CLK_DIS);
1628
1629 usleep_range(2, 5);
1630
1631
1632 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1633 dwc3_msm_read_reg(mdwc->base,
1634 QSCRATCH_GENERAL_CFG)
1635 | PIPE_UTMI_CLK_SEL
1636 | PIPE3_PHYSTATUS_SW);
1637
1638 usleep_range(2, 5);
1639
1640 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1641 dwc3_msm_read_reg(mdwc->base,
1642 QSCRATCH_GENERAL_CFG)
1643 & ~PIPE_UTMI_CLK_DIS);
1644 }
1645
1646 dwc3_msm_update_ref_clk(mdwc);
1647 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1648 break;
1649 case DWC3_CONTROLLER_CONNDONE_EVENT:
1650 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1651 /*
1652 * Add power event if the dbm indicates coming out of L1 by
1653 * interrupt
1654 */
1655 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1656 dwc3_msm_write_reg_field(mdwc->base,
1657 PWR_EVNT_IRQ_MASK_REG,
1658 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1659
1660 atomic_set(&dwc->in_lpm, 0);
1661 break;
1662 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1663 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1664 if (dwc->enable_bus_suspend) {
1665 mdwc->suspend = dwc->b_suspend;
1666 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1667 }
1668 break;
1669 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1670 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
1671 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1672 break;
1673 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1674 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
1675 dwc3_restart_usb_work(&mdwc->restart_usb_work);
1676 break;
1677 default:
1678 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1679 break;
1680 }
1681}
1682
1683static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1684{
1685 int ret = 0;
1686
1687 if (core_reset) {
1688 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1689 if (ret)
1690 return;
1691
1692 usleep_range(1000, 1200);
1693 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1694 if (ret)
1695 return;
1696
1697 usleep_range(10000, 12000);
1698 }
1699
1700 if (mdwc->dbm) {
1701 /* Reset the DBM */
1702 dbm_soft_reset(mdwc->dbm, 1);
1703 usleep_range(1000, 1200);
1704 dbm_soft_reset(mdwc->dbm, 0);
1705
1706 /*enable DBM*/
1707 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1708 DBM_EN_MASK, 0x1);
1709 dbm_enable(mdwc->dbm);
1710 }
1711}
1712
1713static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1714{
1715 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1716 u32 val;
1717
1718 /* Configure AHB2PHY for one wait state read/write */
1719 if (mdwc->ahb2phy_base) {
1720 clk_prepare_enable(mdwc->cfg_ahb_clk);
1721 val = readl_relaxed(mdwc->ahb2phy_base +
1722 PERIPH_SS_AHB2PHY_TOP_CFG);
1723 if (val != ONE_READ_WRITE_WAIT) {
1724 writel_relaxed(ONE_READ_WRITE_WAIT,
1725 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1726 /* complete above write before configuring USB PHY. */
1727 mb();
1728 }
1729 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1730 }
1731
1732 if (!mdwc->init) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001733 dwc3_core_pre_init(dwc);
1734 mdwc->init = true;
1735 }
1736
1737 dwc3_core_init(dwc);
1738 /* Re-configure event buffers */
1739 dwc3_event_buffers_setup(dwc);
1740}
1741
1742static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1743{
1744 unsigned long timeout;
1745 u32 reg = 0;
1746
1747 if ((mdwc->in_host_mode || mdwc->vbus_active)
1748 && dwc3_msm_is_superspeed(mdwc)) {
1749 if (!atomic_read(&mdwc->in_p3)) {
1750 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1751 return -EBUSY;
1752 }
1753 }
1754
1755 /* Clear previous L2 events */
1756 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1757 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1758
1759 /* Prepare HSPHY for suspend */
1760 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1761 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1762 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1763
1764 /* Wait for PHY to go into L2 */
1765 timeout = jiffies + msecs_to_jiffies(5);
1766 while (!time_after(jiffies, timeout)) {
1767 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1768 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1769 break;
1770 }
1771 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1772 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1773
1774 /* Clear L2 event bit */
1775 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1776 PWR_EVNT_LPM_IN_L2_MASK);
1777
1778 return 0;
1779}
1780
1781static void dwc3_msm_bus_vote_w(struct work_struct *w)
1782{
1783 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1784 int ret;
1785
1786 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1787 mdwc->bus_vote);
1788 if (ret)
1789 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1790}
1791
1792static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1793{
1794 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1795 int i, num_ports;
1796 u32 reg;
1797
1798 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1799 if (mdwc->in_host_mode) {
1800 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1801 num_ports = HCS_MAX_PORTS(reg);
1802 for (i = 0; i < num_ports; i++) {
1803 reg = dwc3_msm_read_reg(mdwc->base,
1804 USB3_PORTSC + i*0x10);
1805 if (reg & PORT_PE) {
1806 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1807 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1808 else if (DEV_LOWSPEED(reg))
1809 mdwc->hs_phy->flags |= PHY_LS_MODE;
1810 }
1811 }
1812 } else {
1813 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1814 dwc->gadget.speed == USB_SPEED_FULL)
1815 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1816 else if (dwc->gadget.speed == USB_SPEED_LOW)
1817 mdwc->hs_phy->flags |= PHY_LS_MODE;
1818 }
1819}
1820
1821
1822static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1823{
Mayank Rana83ad5822016-08-09 14:17:22 -07001824 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001825 bool can_suspend_ssphy;
1826 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07001827 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001828
1829 if (atomic_read(&dwc->in_lpm)) {
1830 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1831 return 0;
1832 }
1833
1834 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07001835 evt = dwc->ev_buf;
1836 if ((evt->flags & DWC3_EVENT_PENDING)) {
1837 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001838 "%s: %d device events pending, abort suspend\n",
1839 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07001840 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07001841 }
1842 }
1843
1844 if (!mdwc->vbus_active && dwc->is_drd &&
1845 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
1846 /*
1847 * In some cases, the pm_runtime_suspend may be called by
1848 * usb_bam when there is pending lpm flag. However, if this is
1849 * done when cable was disconnected and otg state has not
1850 * yet changed to IDLE, then it means OTG state machine
1851 * is running and we race against it. So cancel LPM for now,
1852 * and OTG state machine will go for LPM later, after completing
1853 * transition to IDLE state.
1854 */
1855 dev_dbg(mdwc->dev,
1856 "%s: cable disconnected while not in idle otg state\n",
1857 __func__);
1858 return -EBUSY;
1859 }
1860
1861 /*
1862 * Check if device is not in CONFIGURED state
1863 * then check controller state of L2 and break
1864 * LPM sequence. Check this for device bus suspend case.
1865 */
1866 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
1867 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
1868 pr_err("%s(): Trying to go in LPM with state:%d\n",
1869 __func__, dwc->gadget.state);
1870 pr_err("%s(): LPM is not performed.\n", __func__);
1871 return -EBUSY;
1872 }
1873
1874 ret = dwc3_msm_prepare_suspend(mdwc);
1875 if (ret)
1876 return ret;
1877
1878 /* Initialize variables here */
1879 can_suspend_ssphy = !(mdwc->in_host_mode &&
1880 dwc3_msm_is_host_superspeed(mdwc));
1881
1882 /* Disable core irq */
1883 if (dwc->irq)
1884 disable_irq(dwc->irq);
1885
1886 /* disable power event irq, hs and ss phy irq is used as wake up src */
1887 disable_irq(mdwc->pwr_event_irq);
1888
1889 dwc3_set_phy_speed_flags(mdwc);
1890 /* Suspend HS PHY */
1891 usb_phy_set_suspend(mdwc->hs_phy, 1);
1892
1893 /* Suspend SS PHY */
1894 if (can_suspend_ssphy) {
1895 /* indicate phy about SS mode */
1896 if (dwc3_msm_is_superspeed(mdwc))
1897 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
1898 usb_phy_set_suspend(mdwc->ss_phy, 1);
1899 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
1900 }
1901
1902 /* make sure above writes are completed before turning off clocks */
1903 wmb();
1904
1905 /* Disable clocks */
1906 if (mdwc->bus_aggr_clk)
1907 clk_disable_unprepare(mdwc->bus_aggr_clk);
1908 clk_disable_unprepare(mdwc->utmi_clk);
1909
1910 clk_set_rate(mdwc->core_clk, 19200000);
1911 clk_disable_unprepare(mdwc->core_clk);
1912 /*
1913 * Disable iface_clk only after core_clk as core_clk has FSM
1914 * depedency on iface_clk. Hence iface_clk should be turned off
1915 * after core_clk is turned off.
1916 */
1917 clk_disable_unprepare(mdwc->iface_clk);
1918 /* USB PHY no more requires TCXO */
1919 clk_disable_unprepare(mdwc->xo_clk);
1920
1921 /* Perform controller power collapse */
1922 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
1923 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
1924 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
1925 dwc3_msm_config_gdsc(mdwc, 0);
1926 clk_disable_unprepare(mdwc->sleep_clk);
1927 }
1928
1929 /* Remove bus voting */
1930 if (mdwc->bus_perf_client) {
1931 mdwc->bus_vote = 0;
1932 schedule_work(&mdwc->bus_vote_w);
1933 }
1934
1935 /*
1936 * release wakeup source with timeout to defer system suspend to
1937 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
1938 * event is received.
1939 */
1940 if (mdwc->lpm_to_suspend_delay) {
1941 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
1942 mdwc->lpm_to_suspend_delay);
1943 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
1944 } else {
1945 pm_relax(mdwc->dev);
1946 }
1947
1948 atomic_set(&dwc->in_lpm, 1);
1949
1950 /*
1951 * with DCP or during cable disconnect, we dont require wakeup
1952 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
1953 * case of host bus suspend and device bus suspend.
1954 */
1955 if (mdwc->vbus_active || mdwc->in_host_mode) {
1956 enable_irq_wake(mdwc->hs_phy_irq);
1957 enable_irq(mdwc->hs_phy_irq);
1958 if (mdwc->ss_phy_irq) {
1959 enable_irq_wake(mdwc->ss_phy_irq);
1960 enable_irq(mdwc->ss_phy_irq);
1961 }
1962 /*
1963 * Enable power event irq during bus suspend in host mode for
1964 * mapping MPM pin for DP so that wakeup can happen in system
1965 * suspend.
1966 */
1967 if (mdwc->in_host_mode) {
1968 enable_irq(mdwc->pwr_event_irq);
1969 enable_irq_wake(mdwc->pwr_event_irq);
1970 }
1971 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
1972 }
1973
1974 dev_info(mdwc->dev, "DWC3 in low power mode\n");
1975 return 0;
1976}
1977
1978static int dwc3_msm_resume(struct dwc3_msm *mdwc)
1979{
1980 int ret;
1981 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1982
1983 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
1984
1985 if (!atomic_read(&dwc->in_lpm)) {
1986 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
1987 return 0;
1988 }
1989
1990 pm_stay_awake(mdwc->dev);
1991
1992 /* Enable bus voting */
1993 if (mdwc->bus_perf_client) {
1994 mdwc->bus_vote = 1;
1995 schedule_work(&mdwc->bus_vote_w);
1996 }
1997
1998 /* Vote for TCXO while waking up USB HSPHY */
1999 ret = clk_prepare_enable(mdwc->xo_clk);
2000 if (ret)
2001 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2002 __func__, ret);
2003
2004 /* Restore controller power collapse */
2005 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2006 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2007 dwc3_msm_config_gdsc(mdwc, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07002008 clk_reset(mdwc->core_clk, CLK_RESET_ASSERT);
2009 /* HW requires a short delay for reset to take place properly */
2010 usleep_range(1000, 1200);
2011 clk_reset(mdwc->core_clk, CLK_RESET_DEASSERT);
2012 clk_prepare_enable(mdwc->sleep_clk);
2013 }
2014
2015 /*
2016 * Enable clocks
2017 * Turned ON iface_clk before core_clk due to FSM depedency.
2018 */
2019 clk_prepare_enable(mdwc->iface_clk);
2020 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2021 clk_prepare_enable(mdwc->core_clk);
2022 clk_prepare_enable(mdwc->utmi_clk);
2023 if (mdwc->bus_aggr_clk)
2024 clk_prepare_enable(mdwc->bus_aggr_clk);
2025
2026 /* Resume SS PHY */
2027 if (mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
2028 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2029 if (mdwc->typec_orientation == ORIENTATION_CC1)
2030 mdwc->ss_phy->flags |= PHY_LANE_A;
2031 if (mdwc->typec_orientation == ORIENTATION_CC2)
2032 mdwc->ss_phy->flags |= PHY_LANE_B;
2033 usb_phy_set_suspend(mdwc->ss_phy, 0);
2034 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2035 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2036 }
2037
2038 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2039 /* Resume HS PHY */
2040 usb_phy_set_suspend(mdwc->hs_phy, 0);
2041
2042 /* Recover from controller power collapse */
2043 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2044 u32 tmp;
2045
2046 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2047
2048 dwc3_msm_power_collapse_por(mdwc);
2049
2050 /* Get initial P3 status and enable IN_P3 event */
2051 tmp = dwc3_msm_read_reg_field(mdwc->base,
2052 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2053 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2054 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2055 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2056
2057 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2058 }
2059
2060 atomic_set(&dwc->in_lpm, 0);
2061
2062 /* Disable HSPHY auto suspend */
2063 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2064 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2065 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2066 DWC3_GUSB2PHYCFG_SUSPHY));
2067
2068 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2069 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2070 disable_irq_wake(mdwc->hs_phy_irq);
2071 disable_irq_nosync(mdwc->hs_phy_irq);
2072 if (mdwc->ss_phy_irq) {
2073 disable_irq_wake(mdwc->ss_phy_irq);
2074 disable_irq_nosync(mdwc->ss_phy_irq);
2075 }
2076 if (mdwc->in_host_mode) {
2077 disable_irq_wake(mdwc->pwr_event_irq);
2078 disable_irq(mdwc->pwr_event_irq);
2079 }
2080 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2081 }
2082
2083 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2084
2085 /* enable power evt irq for IN P3 detection */
2086 enable_irq(mdwc->pwr_event_irq);
2087
2088 /* Enable core irq */
2089 if (dwc->irq)
2090 enable_irq(dwc->irq);
2091
2092 /*
2093 * Handle other power events that could not have been handled during
2094 * Low Power Mode
2095 */
2096 dwc3_pwr_event_handler(mdwc);
2097
Mayank Rana511f3b22016-08-02 12:00:11 -07002098 return 0;
2099}
2100
2101/**
2102 * dwc3_ext_event_notify - callback to handle events from external transceiver
2103 *
2104 * Returns 0 on success
2105 */
2106static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2107{
2108 /* Flush processing any pending events before handling new ones */
2109 flush_delayed_work(&mdwc->sm_work);
2110
2111 if (mdwc->id_state == DWC3_ID_FLOAT) {
2112 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2113 set_bit(ID, &mdwc->inputs);
2114 } else {
2115 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2116 clear_bit(ID, &mdwc->inputs);
2117 }
2118
2119 if (mdwc->vbus_active && !mdwc->in_restart) {
2120 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2121 set_bit(B_SESS_VLD, &mdwc->inputs);
2122 } else {
2123 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2124 clear_bit(B_SESS_VLD, &mdwc->inputs);
2125 }
2126
2127 if (mdwc->suspend) {
2128 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2129 set_bit(B_SUSPEND, &mdwc->inputs);
2130 } else {
2131 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2132 clear_bit(B_SUSPEND, &mdwc->inputs);
2133 }
2134
2135 schedule_delayed_work(&mdwc->sm_work, 0);
2136}
2137
2138static void dwc3_resume_work(struct work_struct *w)
2139{
2140 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002141
2142 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2143
2144 /*
2145 * exit LPM first to meet resume timeline from device side.
2146 * resume_pending flag would prevent calling
2147 * dwc3_msm_resume() in case we are here due to system
2148 * wide resume without usb cable connected. This flag is set
2149 * only in case of power event irq in lpm.
2150 */
2151 if (mdwc->resume_pending) {
2152 dwc3_msm_resume(mdwc);
2153 mdwc->resume_pending = false;
2154 }
2155
Mayank Rana83ad5822016-08-09 14:17:22 -07002156 if (atomic_read(&mdwc->pm_suspended))
Mayank Rana511f3b22016-08-02 12:00:11 -07002157 /* let pm resume kick in resume work later */
2158 return;
Mayank Rana511f3b22016-08-02 12:00:11 -07002159 dwc3_ext_event_notify(mdwc);
2160}
2161
2162static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2163{
2164 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2165 u32 irq_stat, irq_clear = 0;
2166
2167 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2168 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2169
2170 /* Check for P3 events */
2171 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2172 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2173 /* Can't tell if entered or exit P3, so check LINKSTATE */
2174 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2175 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2176 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2177 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2178
2179 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2180 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2181 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2182 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2183 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2184 atomic_set(&mdwc->in_p3, 0);
2185 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2186 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2187 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2188 atomic_set(&mdwc->in_p3, 1);
2189 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2190 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2191 }
2192
2193 /* Clear L2 exit */
2194 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2195 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2196 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2197 }
2198
2199 /* Handle exit from L1 events */
2200 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2201 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2202 __func__);
2203 if (usb_gadget_wakeup(&dwc->gadget))
2204 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2205 __func__);
2206 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2207 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2208 }
2209
2210 /* Unhandled events */
2211 if (irq_stat)
2212 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2213 __func__, irq_stat);
2214
2215 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2216}
2217
2218static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2219{
2220 struct dwc3_msm *mdwc = _mdwc;
2221 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2222
2223 dev_dbg(mdwc->dev, "%s\n", __func__);
2224
2225 if (atomic_read(&dwc->in_lpm))
2226 dwc3_resume_work(&mdwc->resume_work);
2227 else
2228 dwc3_pwr_event_handler(mdwc);
2229
Mayank Rana511f3b22016-08-02 12:00:11 -07002230 return IRQ_HANDLED;
2231}
2232
2233static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2234{
2235 struct dwc3_msm *mdwc = data;
2236 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2237
2238 dwc->t_pwr_evt_irq = ktime_get();
2239 dev_dbg(mdwc->dev, "%s received\n", __func__);
2240 /*
2241 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2242 * which interrupts have been triggered, as the clocks are disabled.
2243 * Resume controller by waking up pwr event irq thread.After re-enabling
2244 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2245 * all other power events.
2246 */
2247 if (atomic_read(&dwc->in_lpm)) {
2248 /* set this to call dwc3_msm_resume() */
2249 mdwc->resume_pending = true;
2250 return IRQ_WAKE_THREAD;
2251 }
2252
2253 dwc3_pwr_event_handler(mdwc);
2254 return IRQ_HANDLED;
2255}
2256
2257static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2258 unsigned long action, void *hcpu)
2259{
2260 uint32_t cpu = (uintptr_t)hcpu;
2261 struct dwc3_msm *mdwc =
2262 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2263
2264 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2265 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2266 cpu_to_affin, mdwc->irq_to_affin);
2267 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2268 }
2269
2270 return NOTIFY_OK;
2271}
2272
2273static void dwc3_otg_sm_work(struct work_struct *w);
2274
2275static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2276{
2277 int ret;
2278
2279 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2280 if (IS_ERR(mdwc->dwc3_gdsc))
2281 mdwc->dwc3_gdsc = NULL;
2282
2283 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2284 if (IS_ERR(mdwc->xo_clk)) {
2285 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2286 __func__);
2287 ret = PTR_ERR(mdwc->xo_clk);
2288 return ret;
2289 }
2290 clk_set_rate(mdwc->xo_clk, 19200000);
2291
2292 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2293 if (IS_ERR(mdwc->iface_clk)) {
2294 dev_err(mdwc->dev, "failed to get iface_clk\n");
2295 ret = PTR_ERR(mdwc->iface_clk);
2296 return ret;
2297 }
2298
2299 /*
2300 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2301 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2302 * On newer platform it can run at 150MHz as well.
2303 */
2304 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2305 if (IS_ERR(mdwc->core_clk)) {
2306 dev_err(mdwc->dev, "failed to get core_clk\n");
2307 ret = PTR_ERR(mdwc->core_clk);
2308 return ret;
2309 }
2310
2311 /*
2312 * Get Max supported clk frequency for USB Core CLK and request
2313 * to set the same.
2314 */
2315 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
2316 if (IS_ERR_VALUE(mdwc->core_clk_rate)) {
2317 dev_err(mdwc->dev, "fail to get core clk max freq.\n");
2318 } else {
2319 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2320 if (ret)
2321 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n",
2322 ret);
2323 }
2324
2325 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2326 if (IS_ERR(mdwc->sleep_clk)) {
2327 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2328 ret = PTR_ERR(mdwc->sleep_clk);
2329 return ret;
2330 }
2331
2332 clk_set_rate(mdwc->sleep_clk, 32000);
2333 mdwc->utmi_clk_rate = 19200000;
2334 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2335 if (IS_ERR(mdwc->utmi_clk)) {
2336 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2337 ret = PTR_ERR(mdwc->utmi_clk);
2338 return ret;
2339 }
2340
2341 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2342 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2343 if (IS_ERR(mdwc->bus_aggr_clk))
2344 mdwc->bus_aggr_clk = NULL;
2345
2346 if (of_property_match_string(mdwc->dev->of_node,
2347 "clock-names", "cfg_ahb_clk") >= 0) {
2348 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2349 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2350 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2351 mdwc->cfg_ahb_clk = NULL;
2352 if (ret != -EPROBE_DEFER)
2353 dev_err(mdwc->dev,
2354 "failed to get cfg_ahb_clk ret %d\n",
2355 ret);
2356 return ret;
2357 }
2358 }
2359
2360 return 0;
2361}
2362
2363static int dwc3_msm_id_notifier(struct notifier_block *nb,
2364 unsigned long event, void *ptr)
2365{
2366 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
2367 struct extcon_dev *edev = ptr;
2368 enum dwc3_id_state id;
2369 int cc_state;
2370
2371 if (!edev) {
2372 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2373 goto done;
2374 }
2375
2376 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2377
2378 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2379
2380 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2381 if (cc_state < 0)
2382 mdwc->typec_orientation = ORIENTATION_NONE;
2383 else
2384 mdwc->typec_orientation =
2385 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2386
Mayank Rana511f3b22016-08-02 12:00:11 -07002387 if (mdwc->id_state != id) {
2388 mdwc->id_state = id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002389 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2390 }
2391
2392done:
2393 return NOTIFY_DONE;
2394}
2395
2396static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2397 unsigned long event, void *ptr)
2398{
2399 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2400 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2401 struct extcon_dev *edev = ptr;
2402 int cc_state;
2403
2404 if (!edev) {
2405 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2406 goto done;
2407 }
2408
2409 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2410
2411 if (mdwc->vbus_active == event)
2412 return NOTIFY_DONE;
2413
2414 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2415 if (cc_state < 0)
2416 mdwc->typec_orientation = ORIENTATION_NONE;
2417 else
2418 mdwc->typec_orientation =
2419 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2420
Mayank Rana511f3b22016-08-02 12:00:11 -07002421 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002422 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002423 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002424done:
2425 return NOTIFY_DONE;
2426}
2427
2428static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2429{
2430 struct device_node *node = mdwc->dev->of_node;
2431 struct extcon_dev *edev;
2432 int ret = 0;
2433
2434 if (!of_property_read_bool(node, "extcon"))
2435 return 0;
2436
2437 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2438 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2439 return PTR_ERR(edev);
2440
2441 if (!IS_ERR(edev)) {
2442 mdwc->extcon_vbus = edev;
2443 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2444 ret = extcon_register_notifier(edev, EXTCON_USB,
2445 &mdwc->vbus_nb);
2446 if (ret < 0) {
2447 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2448 return ret;
2449 }
2450 }
2451
2452 /* if a second phandle was provided, use it to get a separate edev */
2453 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2454 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2455 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2456 ret = PTR_ERR(edev);
2457 goto err;
2458 }
2459 }
2460
2461 if (!IS_ERR(edev)) {
2462 mdwc->extcon_id = edev;
2463 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2464 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2465 &mdwc->id_nb);
2466 if (ret < 0) {
2467 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2468 goto err;
2469 }
2470 }
2471
2472 return 0;
2473err:
2474 if (mdwc->extcon_vbus)
2475 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2476 &mdwc->vbus_nb);
2477 return ret;
2478}
2479
2480static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2481 char *buf)
2482{
2483 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2484
2485 if (mdwc->vbus_active)
2486 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2487 if (mdwc->id_state == DWC3_ID_GROUND)
2488 return snprintf(buf, PAGE_SIZE, "host\n");
2489
2490 return snprintf(buf, PAGE_SIZE, "none\n");
2491}
2492
2493static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2494 const char *buf, size_t count)
2495{
2496 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2497
2498 if (sysfs_streq(buf, "peripheral")) {
2499 mdwc->vbus_active = true;
2500 mdwc->id_state = DWC3_ID_FLOAT;
2501 } else if (sysfs_streq(buf, "host")) {
2502 mdwc->vbus_active = false;
2503 mdwc->id_state = DWC3_ID_GROUND;
2504 } else {
2505 mdwc->vbus_active = false;
2506 mdwc->id_state = DWC3_ID_FLOAT;
2507 }
2508
2509 dwc3_ext_event_notify(mdwc);
2510
2511 return count;
2512}
2513
2514static DEVICE_ATTR_RW(mode);
2515
2516static int dwc3_msm_probe(struct platform_device *pdev)
2517{
2518 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2519 struct device *dev = &pdev->dev;
2520 struct dwc3_msm *mdwc;
2521 struct dwc3 *dwc;
2522 struct resource *res;
2523 void __iomem *tcsr;
2524 bool host_mode;
2525 int ret = 0;
2526 int ext_hub_reset_gpio;
2527 u32 val;
2528
2529 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2530 if (!mdwc)
2531 return -ENOMEM;
2532
2533 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2534 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2535 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2536 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2537 return -EOPNOTSUPP;
2538 }
2539 }
2540
2541 platform_set_drvdata(pdev, mdwc);
2542 mdwc->dev = &pdev->dev;
2543
2544 INIT_LIST_HEAD(&mdwc->req_complete_list);
2545 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2546 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2547 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
2548 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
2549
2550 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2551 if (!mdwc->dwc3_wq) {
2552 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2553 return -ENOMEM;
2554 }
2555
2556 /* Get all clks and gdsc reference */
2557 ret = dwc3_msm_get_clk_gdsc(mdwc);
2558 if (ret) {
2559 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2560 return ret;
2561 }
2562
2563 mdwc->id_state = DWC3_ID_FLOAT;
2564 set_bit(ID, &mdwc->inputs);
2565
2566 mdwc->charging_disabled = of_property_read_bool(node,
2567 "qcom,charging-disabled");
2568
2569 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2570 &mdwc->lpm_to_suspend_delay);
2571 if (ret) {
2572 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2573 mdwc->lpm_to_suspend_delay = 0;
2574 }
2575
2576 /*
2577 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2578 * DP and DM linestate transitions during low power mode.
2579 */
2580 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2581 if (mdwc->hs_phy_irq < 0) {
2582 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2583 ret = -EINVAL;
2584 goto err;
2585 } else {
2586 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2587 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2588 msm_dwc3_pwr_irq,
2589 msm_dwc3_pwr_irq_thread,
2590 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2591 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2592 if (ret) {
2593 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2594 ret);
2595 goto err;
2596 }
2597 }
2598
2599 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2600 if (mdwc->ss_phy_irq < 0) {
2601 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2602 } else {
2603 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2604 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2605 msm_dwc3_pwr_irq,
2606 msm_dwc3_pwr_irq_thread,
2607 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2608 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2609 if (ret) {
2610 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2611 ret);
2612 goto err;
2613 }
2614 }
2615
2616 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2617 if (mdwc->pwr_event_irq < 0) {
2618 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2619 ret = -EINVAL;
2620 goto err;
2621 } else {
2622 /* will be enabled in dwc3_msm_resume() */
2623 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
2624 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
2625 msm_dwc3_pwr_irq,
2626 msm_dwc3_pwr_irq_thread,
2627 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
2628 "msm_dwc3", mdwc);
2629 if (ret) {
2630 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
2631 ret);
2632 goto err;
2633 }
2634 }
2635
2636 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
2637 if (!res) {
2638 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
2639 } else {
2640 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
2641 resource_size(res));
2642 if (IS_ERR_OR_NULL(tcsr)) {
2643 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
2644 } else {
2645 /* Enable USB3 on the primary USB port. */
2646 writel_relaxed(0x1, tcsr);
2647 /*
2648 * Ensure that TCSR write is completed before
2649 * USB registers initialization.
2650 */
2651 mb();
2652 }
2653 }
2654
2655 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
2656 if (!res) {
2657 dev_err(&pdev->dev, "missing memory base resource\n");
2658 ret = -ENODEV;
2659 goto err;
2660 }
2661
2662 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
2663 resource_size(res));
2664 if (!mdwc->base) {
2665 dev_err(&pdev->dev, "ioremap failed\n");
2666 ret = -ENODEV;
2667 goto err;
2668 }
2669
2670 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2671 "ahb2phy_base");
2672 if (res) {
2673 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
2674 res->start, resource_size(res));
2675 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
2676 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
2677 mdwc->ahb2phy_base = NULL;
2678 } else {
2679 /*
2680 * On some targets cfg_ahb_clk depends upon usb gdsc
2681 * regulator. If cfg_ahb_clk is enabled without
2682 * turning on usb gdsc regulator clk is stuck off.
2683 */
2684 dwc3_msm_config_gdsc(mdwc, 1);
2685 clk_prepare_enable(mdwc->cfg_ahb_clk);
2686 /* Configure AHB2PHY for one wait state read/write*/
2687 val = readl_relaxed(mdwc->ahb2phy_base +
2688 PERIPH_SS_AHB2PHY_TOP_CFG);
2689 if (val != ONE_READ_WRITE_WAIT) {
2690 writel_relaxed(ONE_READ_WRITE_WAIT,
2691 mdwc->ahb2phy_base +
2692 PERIPH_SS_AHB2PHY_TOP_CFG);
2693 /* complete above write before using USB PHY */
2694 mb();
2695 }
2696 clk_disable_unprepare(mdwc->cfg_ahb_clk);
2697 dwc3_msm_config_gdsc(mdwc, 0);
2698 }
2699 }
2700
2701 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
2702 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
2703 if (IS_ERR(mdwc->dbm)) {
2704 dev_err(&pdev->dev, "unable to get dbm device\n");
2705 ret = -EPROBE_DEFER;
2706 goto err;
2707 }
2708 /*
2709 * Add power event if the dbm indicates coming out of L1
2710 * by interrupt
2711 */
2712 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
2713 if (!mdwc->pwr_event_irq) {
2714 dev_err(&pdev->dev,
2715 "need pwr_event_irq exiting L1\n");
2716 ret = -EINVAL;
2717 goto err;
2718 }
2719 }
2720 }
2721
2722 ext_hub_reset_gpio = of_get_named_gpio(node,
2723 "qcom,ext-hub-reset-gpio", 0);
2724
2725 if (gpio_is_valid(ext_hub_reset_gpio)
2726 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
2727 "qcom,ext-hub-reset-gpio"))) {
2728 /* reset external hub */
2729 gpio_direction_output(ext_hub_reset_gpio, 1);
2730 /*
2731 * Hub reset should be asserted for minimum 5microsec
2732 * before deasserting.
2733 */
2734 usleep_range(5, 1000);
2735 gpio_direction_output(ext_hub_reset_gpio, 0);
2736 }
2737
2738 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
2739 &mdwc->tx_fifo_size))
2740 dev_err(&pdev->dev,
2741 "unable to read platform data tx fifo size\n");
2742
2743 mdwc->disable_host_mode_pm = of_property_read_bool(node,
2744 "qcom,disable-host-mode-pm");
2745
2746 dwc3_set_notifier(&dwc3_msm_notify_event);
2747
2748 /* Assumes dwc3 is the first DT child of dwc3-msm */
2749 dwc3_node = of_get_next_available_child(node, NULL);
2750 if (!dwc3_node) {
2751 dev_err(&pdev->dev, "failed to find dwc3 child\n");
2752 ret = -ENODEV;
2753 goto err;
2754 }
2755
2756 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2757 if (ret) {
2758 dev_err(&pdev->dev,
2759 "failed to add create dwc3 core\n");
2760 of_node_put(dwc3_node);
2761 goto err;
2762 }
2763
2764 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
2765 of_node_put(dwc3_node);
2766 if (!mdwc->dwc3) {
2767 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
2768 goto put_dwc3;
2769 }
2770
2771 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2772 "usb-phy", 0);
2773 if (IS_ERR(mdwc->hs_phy)) {
2774 dev_err(&pdev->dev, "unable to get hsphy device\n");
2775 ret = PTR_ERR(mdwc->hs_phy);
2776 goto put_dwc3;
2777 }
2778 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2779 "usb-phy", 1);
2780 if (IS_ERR(mdwc->ss_phy)) {
2781 dev_err(&pdev->dev, "unable to get ssphy device\n");
2782 ret = PTR_ERR(mdwc->ss_phy);
2783 goto put_dwc3;
2784 }
2785
2786 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2787 if (mdwc->bus_scale_table) {
2788 mdwc->bus_perf_client =
2789 msm_bus_scale_register_client(mdwc->bus_scale_table);
2790 }
2791
2792 dwc = platform_get_drvdata(mdwc->dwc3);
2793 if (!dwc) {
2794 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
2795 goto put_dwc3;
2796 }
2797
2798 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
2799 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
2800
2801 if (cpu_to_affin)
2802 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2803
2804 /*
2805 * Clocks and regulators will not be turned on until the first time
2806 * runtime PM resume is called. This is to allow for booting up with
2807 * charger already connected so as not to disturb PHY line states.
2808 */
2809 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
2810 atomic_set(&dwc->in_lpm, 1);
2811 pm_runtime_set_suspended(mdwc->dev);
2812 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
2813 pm_runtime_use_autosuspend(mdwc->dev);
2814 pm_runtime_enable(mdwc->dev);
2815 device_init_wakeup(mdwc->dev, 1);
2816
2817 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
2818 pm_runtime_get_noresume(mdwc->dev);
2819
2820 ret = dwc3_msm_extcon_register(mdwc);
2821 if (ret)
2822 goto put_dwc3;
2823
2824 /* Update initial VBUS/ID state from extcon */
2825 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
2826 EXTCON_USB))
2827 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
2828 if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
2829 EXTCON_USB_HOST))
2830 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
2831
2832 device_create_file(&pdev->dev, &dev_attr_mode);
2833
2834 schedule_delayed_work(&mdwc->sm_work, 0);
2835
2836 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
2837 if (!dwc->is_drd && host_mode) {
2838 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
2839 mdwc->id_state = DWC3_ID_GROUND;
2840 dwc3_ext_event_notify(mdwc);
2841 }
2842
2843 return 0;
2844
2845put_dwc3:
2846 platform_device_put(mdwc->dwc3);
2847 if (mdwc->bus_perf_client)
2848 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2849err:
2850 return ret;
2851}
2852
2853static int dwc3_msm_remove_children(struct device *dev, void *data)
2854{
2855 device_unregister(dev);
2856 return 0;
2857}
2858
2859static int dwc3_msm_remove(struct platform_device *pdev)
2860{
2861 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
2862 int ret_pm;
2863
2864 device_remove_file(&pdev->dev, &dev_attr_mode);
2865
2866 if (cpu_to_affin)
2867 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2868
2869 /*
2870 * In case of system suspend, pm_runtime_get_sync fails.
2871 * Hence turn ON the clocks manually.
2872 */
2873 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002874 if (ret_pm < 0) {
2875 dev_err(mdwc->dev,
2876 "pm_runtime_get_sync failed with %d\n", ret_pm);
2877 clk_prepare_enable(mdwc->utmi_clk);
2878 clk_prepare_enable(mdwc->core_clk);
2879 clk_prepare_enable(mdwc->iface_clk);
2880 clk_prepare_enable(mdwc->sleep_clk);
2881 if (mdwc->bus_aggr_clk)
2882 clk_prepare_enable(mdwc->bus_aggr_clk);
2883 clk_prepare_enable(mdwc->xo_clk);
2884 }
2885
2886 cancel_delayed_work_sync(&mdwc->sm_work);
2887
2888 if (mdwc->hs_phy)
2889 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
2890 platform_device_put(mdwc->dwc3);
2891 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
2892
Mayank Rana511f3b22016-08-02 12:00:11 -07002893 pm_runtime_disable(mdwc->dev);
2894 pm_runtime_barrier(mdwc->dev);
2895 pm_runtime_put_sync(mdwc->dev);
2896 pm_runtime_set_suspended(mdwc->dev);
2897 device_wakeup_disable(mdwc->dev);
2898
2899 if (mdwc->bus_perf_client)
2900 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2901
2902 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
2903 regulator_disable(mdwc->vbus_reg);
2904
2905 disable_irq(mdwc->hs_phy_irq);
2906 if (mdwc->ss_phy_irq)
2907 disable_irq(mdwc->ss_phy_irq);
2908 disable_irq(mdwc->pwr_event_irq);
2909
2910 clk_disable_unprepare(mdwc->utmi_clk);
2911 clk_set_rate(mdwc->core_clk, 19200000);
2912 clk_disable_unprepare(mdwc->core_clk);
2913 clk_disable_unprepare(mdwc->iface_clk);
2914 clk_disable_unprepare(mdwc->sleep_clk);
2915 clk_disable_unprepare(mdwc->xo_clk);
2916 clk_put(mdwc->xo_clk);
2917
2918 dwc3_msm_config_gdsc(mdwc, 0);
2919
2920 return 0;
2921}
2922
2923#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
2924
2925/**
2926 * dwc3_otg_start_host - helper function for starting/stoping the host
2927 * controller driver.
2928 *
2929 * @mdwc: Pointer to the dwc3_msm structure.
2930 * @on: start / stop the host controller driver.
2931 *
2932 * Returns 0 on success otherwise negative errno.
2933 */
2934static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
2935{
2936 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2937 int ret = 0;
2938
2939 if (!dwc->xhci)
2940 return -EINVAL;
2941
2942 /*
2943 * The vbus_reg pointer could have multiple values
2944 * NULL: regulator_get() hasn't been called, or was previously deferred
2945 * IS_ERR: regulator could not be obtained, so skip using it
2946 * Valid pointer otherwise
2947 */
2948 if (!mdwc->vbus_reg) {
2949 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
2950 "vbus_dwc3");
2951 if (IS_ERR(mdwc->vbus_reg) &&
2952 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
2953 /* regulators may not be ready, so retry again later */
2954 mdwc->vbus_reg = NULL;
2955 return -EPROBE_DEFER;
2956 }
2957 }
2958
2959 if (on) {
2960 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
2961
2962 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002963 mdwc->hs_phy->flags |= PHY_HOST_MODE;
2964 mdwc->ss_phy->flags |= PHY_HOST_MODE;
2965 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
2966 if (!IS_ERR(mdwc->vbus_reg))
2967 ret = regulator_enable(mdwc->vbus_reg);
2968 if (ret) {
2969 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
2970 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
2971 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
2972 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002973 return ret;
2974 }
2975
2976 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
2977
2978 /*
2979 * FIXME If micro A cable is disconnected during system suspend,
2980 * xhci platform device will be removed before runtime pm is
2981 * enabled for xhci device. Due to this, disable_depth becomes
2982 * greater than one and runtimepm is not enabled for next microA
2983 * connect. Fix this by calling pm_runtime_init for xhci device.
2984 */
2985 pm_runtime_init(&dwc->xhci->dev);
2986 ret = platform_device_add(dwc->xhci);
2987 if (ret) {
2988 dev_err(mdwc->dev,
2989 "%s: failed to add XHCI pdev ret=%d\n",
2990 __func__, ret);
2991 if (!IS_ERR(mdwc->vbus_reg))
2992 regulator_disable(mdwc->vbus_reg);
2993 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
2994 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
2995 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002996 return ret;
2997 }
2998
2999 /*
3000 * In some cases it is observed that USB PHY is not going into
3001 * suspend with host mode suspend functionality. Hence disable
3002 * XHCI's runtime PM here if disable_host_mode_pm is set.
3003 */
3004 if (mdwc->disable_host_mode_pm)
3005 pm_runtime_disable(&dwc->xhci->dev);
3006
3007 mdwc->in_host_mode = true;
3008 dwc3_usb3_phy_suspend(dwc, true);
3009
3010 /* xHCI should have incremented child count as necessary */
Mayank Rana511f3b22016-08-02 12:00:11 -07003011 pm_runtime_mark_last_busy(mdwc->dev);
3012 pm_runtime_put_sync_autosuspend(mdwc->dev);
3013 } else {
3014 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3015
3016 if (!IS_ERR(mdwc->vbus_reg))
3017 ret = regulator_disable(mdwc->vbus_reg);
3018 if (ret) {
3019 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3020 return ret;
3021 }
3022
3023 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003024 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3025 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3026 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3027 platform_device_del(dwc->xhci);
3028
3029 /*
3030 * Perform USB hardware RESET (both core reset and DBM reset)
3031 * when moving from host to peripheral. This is required for
3032 * peripheral mode to work.
3033 */
3034 dwc3_msm_block_reset(mdwc, true);
3035
3036 dwc3_usb3_phy_suspend(dwc, false);
3037 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3038
3039 mdwc->in_host_mode = false;
3040
3041 /* re-init core and OTG registers as block reset clears these */
3042 dwc3_post_host_reset_core_init(dwc);
3043 pm_runtime_mark_last_busy(mdwc->dev);
3044 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003045 }
3046
3047 return 0;
3048}
3049
3050static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3051{
3052 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3053
3054 /* Update OTG VBUS Valid from HSPHY to controller */
3055 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3056 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3057 UTMI_OTG_VBUS_VALID,
3058 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3059
3060 /* Update only if Super Speed is supported */
3061 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3062 /* Update VBUS Valid from SSPHY to controller */
3063 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3064 LANE0_PWR_PRESENT,
3065 vbus_present ? LANE0_PWR_PRESENT : 0);
3066 }
3067}
3068
3069/**
3070 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3071 *
3072 * @mdwc: Pointer to the dwc3_msm structure.
3073 * @on: Turn ON/OFF the gadget.
3074 *
3075 * Returns 0 on success otherwise negative errno.
3076 */
3077static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3078{
3079 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3080
3081 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003082
3083 if (on) {
3084 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3085 __func__, dwc->gadget.name);
3086
3087 dwc3_override_vbus_status(mdwc, true);
3088 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3089 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3090
3091 /*
3092 * Core reset is not required during start peripheral. Only
3093 * DBM reset is required, hence perform only DBM reset here.
3094 */
3095 dwc3_msm_block_reset(mdwc, false);
3096
3097 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3098 usb_gadget_vbus_connect(&dwc->gadget);
3099 } else {
3100 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3101 __func__, dwc->gadget.name);
3102 usb_gadget_vbus_disconnect(&dwc->gadget);
3103 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3104 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3105 dwc3_override_vbus_status(mdwc, false);
3106 dwc3_usb3_phy_suspend(dwc, false);
3107 }
3108
3109 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003110
3111 return 0;
3112}
3113
3114static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3115{
3116 union power_supply_propval pval = {0,};
3117
3118 if (mdwc->charging_disabled)
3119 return 0;
3120
3121 if (mdwc->max_power == mA)
3122 return 0;
3123
3124 if (!mdwc->usb_psy) {
3125 mdwc->usb_psy = power_supply_get_by_name("usb");
3126 if (!mdwc->usb_psy) {
3127 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3128 return -ENODEV;
3129 }
3130 }
3131
3132 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3133
3134 if (mdwc->max_power <= 2 && mA > 2) {
3135 /* Enable Charging */
3136 pval.intval = true;
3137 if (power_supply_set_property(mdwc->usb_psy,
3138 POWER_SUPPLY_PROP_ONLINE, &pval))
3139 goto psy_error;
3140 pval.intval = 1000 * mA;
3141 if (power_supply_set_property(mdwc->usb_psy,
3142 POWER_SUPPLY_PROP_CURRENT_MAX, &pval))
3143 goto psy_error;
3144 } else if (mdwc->max_power > 0 && (mA == 0 || mA == 2)) {
3145 /* Disable charging */
3146 pval.intval = false;
3147 if (power_supply_set_property(mdwc->usb_psy,
3148 POWER_SUPPLY_PROP_ONLINE, &pval))
3149 goto psy_error;
3150 } else {
3151 /* Enable charging */
3152 pval.intval = true;
3153 if (power_supply_set_property(mdwc->usb_psy,
3154 POWER_SUPPLY_PROP_ONLINE, &pval))
3155 goto psy_error;
3156 }
3157
3158 /* Set max current limit in uA */
3159 pval.intval = 1000 * mA;
3160 if (power_supply_set_property(mdwc->usb_psy,
3161 POWER_SUPPLY_PROP_CURRENT_MAX, &pval))
3162 goto psy_error;
3163
3164 mdwc->max_power = mA;
3165 return 0;
3166
3167psy_error:
3168 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3169 return -ENXIO;
3170}
3171
3172
3173/**
3174 * dwc3_otg_sm_work - workqueue function.
3175 *
3176 * @w: Pointer to the dwc3 otg workqueue
3177 *
3178 * NOTE: After any change in otg_state, we must reschdule the state machine.
3179 */
3180static void dwc3_otg_sm_work(struct work_struct *w)
3181{
3182 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3183 struct dwc3 *dwc = NULL;
3184 bool work = 0;
3185 int ret = 0;
3186 unsigned long delay = 0;
3187 const char *state;
3188
3189 if (mdwc->dwc3)
3190 dwc = platform_get_drvdata(mdwc->dwc3);
3191
3192 if (!dwc) {
3193 dev_err(mdwc->dev, "dwc is NULL.\n");
3194 return;
3195 }
3196
3197 state = usb_otg_state_string(mdwc->otg_state);
3198 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana511f3b22016-08-02 12:00:11 -07003199
3200 /* Check OTG state */
3201 switch (mdwc->otg_state) {
3202 case OTG_STATE_UNDEFINED:
3203 /* Do nothing if no cable connected */
3204 if (test_bit(ID, &mdwc->inputs) &&
3205 !test_bit(B_SESS_VLD, &mdwc->inputs))
3206 break;
3207
Mayank Rana511f3b22016-08-02 12:00:11 -07003208 mdwc->otg_state = OTG_STATE_B_IDLE;
3209 /* fall-through */
3210 case OTG_STATE_B_IDLE:
3211 if (!test_bit(ID, &mdwc->inputs)) {
3212 dev_dbg(mdwc->dev, "!id\n");
3213 mdwc->otg_state = OTG_STATE_A_IDLE;
3214 work = 1;
3215 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3216 dev_dbg(mdwc->dev, "b_sess_vld\n");
3217 /*
3218 * Increment pm usage count upon cable connect. Count
3219 * is decremented in OTG_STATE_B_PERIPHERAL state on
3220 * cable disconnect or in bus suspend.
3221 */
3222 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003223 dwc3_otg_start_peripheral(mdwc, 1);
3224 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3225 work = 1;
3226 } else {
3227 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3228 dev_dbg(mdwc->dev, "Cable disconnected\n");
3229 }
3230 break;
3231
3232 case OTG_STATE_B_PERIPHERAL:
3233 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3234 !test_bit(ID, &mdwc->inputs)) {
3235 dev_dbg(mdwc->dev, "!id || !bsv\n");
3236 mdwc->otg_state = OTG_STATE_B_IDLE;
3237 dwc3_otg_start_peripheral(mdwc, 0);
3238 /*
3239 * Decrement pm usage count upon cable disconnect
3240 * which was incremented upon cable connect in
3241 * OTG_STATE_B_IDLE state
3242 */
3243 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003244 work = 1;
3245 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3246 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3247 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3248 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3249 /*
3250 * Decrement pm usage count upon bus suspend.
3251 * Count was incremented either upon cable
3252 * connect in OTG_STATE_B_IDLE or host
3253 * initiated resume after bus suspend in
3254 * OTG_STATE_B_SUSPEND state
3255 */
3256 pm_runtime_mark_last_busy(mdwc->dev);
3257 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003258 }
3259 break;
3260
3261 case OTG_STATE_B_SUSPEND:
3262 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3263 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3264 mdwc->otg_state = OTG_STATE_B_IDLE;
3265 dwc3_otg_start_peripheral(mdwc, 0);
3266 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3267 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3268 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3269 /*
3270 * Increment pm usage count upon host
3271 * initiated resume. Count was decremented
3272 * upon bus suspend in
3273 * OTG_STATE_B_PERIPHERAL state.
3274 */
3275 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003276 }
3277 break;
3278
3279 case OTG_STATE_A_IDLE:
3280 /* Switch to A-Device*/
3281 if (test_bit(ID, &mdwc->inputs)) {
3282 dev_dbg(mdwc->dev, "id\n");
3283 mdwc->otg_state = OTG_STATE_B_IDLE;
3284 mdwc->vbus_retry_count = 0;
3285 work = 1;
3286 } else {
3287 mdwc->otg_state = OTG_STATE_A_HOST;
3288 ret = dwc3_otg_start_host(mdwc, 1);
3289 if ((ret == -EPROBE_DEFER) &&
3290 mdwc->vbus_retry_count < 3) {
3291 /*
3292 * Get regulator failed as regulator driver is
3293 * not up yet. Will try to start host after 1sec
3294 */
3295 mdwc->otg_state = OTG_STATE_A_IDLE;
3296 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3297 delay = VBUS_REG_CHECK_DELAY;
3298 work = 1;
3299 mdwc->vbus_retry_count++;
3300 } else if (ret) {
3301 dev_err(mdwc->dev, "unable to start host\n");
3302 mdwc->otg_state = OTG_STATE_A_IDLE;
3303 goto ret;
3304 }
3305 }
3306 break;
3307
3308 case OTG_STATE_A_HOST:
3309 if (test_bit(ID, &mdwc->inputs)) {
3310 dev_dbg(mdwc->dev, "id\n");
3311 dwc3_otg_start_host(mdwc, 0);
3312 mdwc->otg_state = OTG_STATE_B_IDLE;
3313 mdwc->vbus_retry_count = 0;
3314 work = 1;
3315 } else {
3316 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003317 if (dwc)
3318 pm_runtime_resume(&dwc->xhci->dev);
3319 }
3320 break;
3321
3322 default:
3323 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3324
3325 }
3326
3327 if (work)
3328 schedule_delayed_work(&mdwc->sm_work, delay);
3329
3330ret:
3331 return;
3332}
3333
3334#ifdef CONFIG_PM_SLEEP
3335static int dwc3_msm_pm_suspend(struct device *dev)
3336{
3337 int ret = 0;
3338 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3339 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3340
3341 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003342
3343 flush_workqueue(mdwc->dwc3_wq);
3344 if (!atomic_read(&dwc->in_lpm)) {
3345 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3346 return -EBUSY;
3347 }
3348
3349 ret = dwc3_msm_suspend(mdwc);
3350 if (!ret)
3351 atomic_set(&mdwc->pm_suspended, 1);
3352
3353 return ret;
3354}
3355
3356static int dwc3_msm_pm_resume(struct device *dev)
3357{
3358 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3359
3360 dev_dbg(dev, "dwc3-msm PM resume\n");
3361
Mayank Rana511f3b22016-08-02 12:00:11 -07003362 /* flush to avoid race in read/write of pm_suspended */
3363 flush_workqueue(mdwc->dwc3_wq);
3364 atomic_set(&mdwc->pm_suspended, 0);
3365
3366 /* kick in otg state machine */
3367 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3368
3369 return 0;
3370}
3371#endif
3372
3373#ifdef CONFIG_PM
3374static int dwc3_msm_runtime_idle(struct device *dev)
3375{
3376 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003377
3378 return 0;
3379}
3380
3381static int dwc3_msm_runtime_suspend(struct device *dev)
3382{
3383 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3384
3385 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003386
3387 return dwc3_msm_suspend(mdwc);
3388}
3389
3390static int dwc3_msm_runtime_resume(struct device *dev)
3391{
3392 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3393
3394 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003395
3396 return dwc3_msm_resume(mdwc);
3397}
3398#endif
3399
3400static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3401 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3402 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3403 dwc3_msm_runtime_idle)
3404};
3405
3406static const struct of_device_id of_dwc3_matach[] = {
3407 {
3408 .compatible = "qcom,dwc-usb3-msm",
3409 },
3410 { },
3411};
3412MODULE_DEVICE_TABLE(of, of_dwc3_matach);
3413
3414static struct platform_driver dwc3_msm_driver = {
3415 .probe = dwc3_msm_probe,
3416 .remove = dwc3_msm_remove,
3417 .driver = {
3418 .name = "msm-dwc3",
3419 .pm = &dwc3_msm_dev_pm_ops,
3420 .of_match_table = of_dwc3_matach,
3421 },
3422};
3423
3424MODULE_LICENSE("GPL v2");
3425MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
3426
3427static int dwc3_msm_init(void)
3428{
3429 return platform_driver_register(&dwc3_msm_driver);
3430}
3431module_init(dwc3_msm_init);
3432
3433static void __exit dwc3_msm_exit(void)
3434{
3435 platform_driver_unregister(&dwc3_msm_driver);
3436}
3437module_exit(dwc3_msm_exit);