blob: 1dd3f88282a54eb7a04f8b8174a2e0b383b00895 [file] [log] [blame]
Mayank Rana511f3b22016-08-02 12:00:11 -07001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/delay.h>
30#include <linux/of.h>
31#include <linux/of_platform.h>
32#include <linux/of_gpio.h>
33#include <linux/list.h>
34#include <linux/uaccess.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070038#include <linux/regulator/consumer.h>
39#include <linux/pm_wakeup.h>
40#include <linux/power_supply.h>
41#include <linux/cdev.h>
42#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070043#include <linux/msm-bus.h>
44#include <linux/irq.h>
45#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053046#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070047#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070048
49#include "power.h"
50#include "core.h"
51#include "gadget.h"
52#include "dbm.h"
53#include "debug.h"
54#include "xhci.h"
55
56/* time out to wait for USB cable status notification (in ms)*/
57#define SM_INIT_TIMEOUT 30000
58
59/* AHB2PHY register offsets */
60#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
61
62/* AHB2PHY read/write waite value */
63#define ONE_READ_WRITE_WAIT 0x11
64
65/* cpu to fix usb interrupt */
66static int cpu_to_affin;
67module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
69
70/* XHCI registers */
71#define USB3_HCSPARAMS1 (0x4)
72#define USB3_PORTSC (0x420)
73
74/**
75 * USB QSCRATCH Hardware registers
76 *
77 */
78#define QSCRATCH_REG_OFFSET (0x000F8800)
79#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
80#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
81#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
82#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
83
84#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
85#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
86#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
87#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
88#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
89
90/* QSCRATCH_GENERAL_CFG register bit offset */
91#define PIPE_UTMI_CLK_SEL BIT(0)
92#define PIPE3_PHYSTATUS_SW BIT(3)
93#define PIPE_UTMI_CLK_DIS BIT(8)
94
95#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
96#define UTMI_OTG_VBUS_VALID BIT(20)
97#define SW_SESSVLD_SEL BIT(28)
98
99#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
100#define LANE0_PWR_PRESENT BIT(24)
101
102/* GSI related registers */
103#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
104#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
105
106#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
107#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
108#define GSI_CLK_EN_MASK BIT(12)
109#define BLOCK_GSI_WR_GO_MASK BIT(1)
110#define GSI_EN_MASK BIT(0)
111
112#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
113#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
114#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
115#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
116
117#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
118#define GSI_WR_CTRL_STATE_MASK BIT(15)
119
Mayank Ranaf4918d32016-12-15 13:35:55 -0800120#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
121#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
122#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
123#define DWC3_GEVENT_TYPE_GSI 0x3
124
Mayank Rana511f3b22016-08-02 12:00:11 -0700125struct dwc3_msm_req_complete {
126 struct list_head list_item;
127 struct usb_request *req;
128 void (*orig_complete)(struct usb_ep *ep,
129 struct usb_request *req);
130};
131
132enum dwc3_id_state {
133 DWC3_ID_GROUND = 0,
134 DWC3_ID_FLOAT,
135};
136
137/* for type c cable */
138enum plug_orientation {
139 ORIENTATION_NONE,
140 ORIENTATION_CC1,
141 ORIENTATION_CC2,
142};
143
144/* Input bits to state machine (mdwc->inputs) */
145
146#define ID 0
147#define B_SESS_VLD 1
148#define B_SUSPEND 2
149
150struct dwc3_msm {
151 struct device *dev;
152 void __iomem *base;
153 void __iomem *ahb2phy_base;
154 struct platform_device *dwc3;
155 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
156 struct list_head req_complete_list;
157 struct clk *xo_clk;
158 struct clk *core_clk;
159 long core_clk_rate;
160 struct clk *iface_clk;
161 struct clk *sleep_clk;
162 struct clk *utmi_clk;
163 unsigned int utmi_clk_rate;
164 struct clk *utmi_clk_src;
165 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530166 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700167 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530168 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700169 struct regulator *dwc3_gdsc;
170
171 struct usb_phy *hs_phy, *ss_phy;
172
173 struct dbm *dbm;
174
175 /* VBUS regulator for host mode */
176 struct regulator *vbus_reg;
177 int vbus_retry_count;
178 bool resume_pending;
179 atomic_t pm_suspended;
180 int hs_phy_irq;
181 int ss_phy_irq;
182 struct work_struct resume_work;
183 struct work_struct restart_usb_work;
184 bool in_restart;
185 struct workqueue_struct *dwc3_wq;
186 struct delayed_work sm_work;
187 unsigned long inputs;
188 unsigned int max_power;
189 bool charging_disabled;
190 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700191 struct work_struct bus_vote_w;
192 unsigned int bus_vote;
193 u32 bus_perf_client;
194 struct msm_bus_scale_pdata *bus_scale_table;
195 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700196 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700197 bool in_host_mode;
198 unsigned int tx_fifo_size;
199 bool vbus_active;
200 bool suspend;
201 bool disable_host_mode_pm;
202 enum dwc3_id_state id_state;
203 unsigned long lpm_flags;
204#define MDWC3_SS_PHY_SUSPEND BIT(0)
205#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
206#define MDWC3_POWER_COLLAPSE BIT(2)
207
208 unsigned int irq_to_affin;
209 struct notifier_block dwc3_cpu_notifier;
210
211 struct extcon_dev *extcon_vbus;
212 struct extcon_dev *extcon_id;
213 struct notifier_block vbus_nb;
214 struct notifier_block id_nb;
215
216 int pwr_event_irq;
217 atomic_t in_p3;
218 unsigned int lpm_to_suspend_delay;
219 bool init;
220 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800221 u32 num_gsi_event_buffers;
222 struct dwc3_event_buffer **gsi_ev_buff;
Mayank Rana511f3b22016-08-02 12:00:11 -0700223};
224
225#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
226#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
227#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
228
229#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
230#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
231#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
232
233#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
234#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
235#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
236
237#define DSTS_CONNECTSPD_SS 0x4
238
239
240static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
241static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800242static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana511f3b22016-08-02 12:00:11 -0700243/**
244 *
245 * Read register with debug info.
246 *
247 * @base - DWC3 base virtual address.
248 * @offset - register offset.
249 *
250 * @return u32
251 */
252static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
253{
254 u32 val = ioread32(base + offset);
255 return val;
256}
257
258/**
259 * Read register masked field with debug info.
260 *
261 * @base - DWC3 base virtual address.
262 * @offset - register offset.
263 * @mask - register bitmask.
264 *
265 * @return u32
266 */
267static inline u32 dwc3_msm_read_reg_field(void *base,
268 u32 offset,
269 const u32 mask)
270{
271 u32 shift = find_first_bit((void *)&mask, 32);
272 u32 val = ioread32(base + offset);
273
274 val &= mask; /* clear other bits */
275 val >>= shift;
276 return val;
277}
278
279/**
280 *
281 * Write register with debug info.
282 *
283 * @base - DWC3 base virtual address.
284 * @offset - register offset.
285 * @val - value to write.
286 *
287 */
288static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
289{
290 iowrite32(val, base + offset);
291}
292
293/**
294 * Write register masked field with debug info.
295 *
296 * @base - DWC3 base virtual address.
297 * @offset - register offset.
298 * @mask - register bitmask.
299 * @val - value to write.
300 *
301 */
302static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
303 const u32 mask, u32 val)
304{
305 u32 shift = find_first_bit((void *)&mask, 32);
306 u32 tmp = ioread32(base + offset);
307
308 tmp &= ~mask; /* clear written bits */
309 val = tmp | (val << shift);
310 iowrite32(val, base + offset);
311}
312
313/**
314 * Write register and read back masked value to confirm it is written
315 *
316 * @base - DWC3 base virtual address.
317 * @offset - register offset.
318 * @mask - register bitmask specifying what should be updated
319 * @val - value to write.
320 *
321 */
322static inline void dwc3_msm_write_readback(void *base, u32 offset,
323 const u32 mask, u32 val)
324{
325 u32 write_val, tmp = ioread32(base + offset);
326
327 tmp &= ~mask; /* retain other bits */
328 write_val = tmp | val;
329
330 iowrite32(write_val, base + offset);
331
332 /* Read back to see if val was written */
333 tmp = ioread32(base + offset);
334 tmp &= mask; /* clear other bits */
335
336 if (tmp != val)
337 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
338 __func__, val, offset);
339}
340
341static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
342{
343 int i, num_ports;
344 u32 reg;
345
346 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
347 num_ports = HCS_MAX_PORTS(reg);
348
349 for (i = 0; i < num_ports; i++) {
350 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
351 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
352 return true;
353 }
354
355 return false;
356}
357
358static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
359{
360 u8 speed;
361
362 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
363 return !!(speed & DSTS_CONNECTSPD_SS);
364}
365
366static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
367{
368 if (mdwc->in_host_mode)
369 return dwc3_msm_is_host_superspeed(mdwc);
370
371 return dwc3_msm_is_dev_superspeed(mdwc);
372}
373
374#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
375/**
376 * Configure the DBM with the BAM's data fifo.
377 * This function is called by the USB BAM Driver
378 * upon initialization.
379 *
380 * @ep - pointer to usb endpoint.
381 * @addr - address of data fifo.
382 * @size - size of data fifo.
383 *
384 */
385int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
386 u32 size, u8 dst_pipe_idx)
387{
388 struct dwc3_ep *dep = to_dwc3_ep(ep);
389 struct dwc3 *dwc = dep->dwc;
390 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
391
392 dev_dbg(mdwc->dev, "%s\n", __func__);
393
394 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
395 dst_pipe_idx);
396}
397
398
399/**
400* Cleanups for msm endpoint on request complete.
401*
402* Also call original request complete.
403*
404* @usb_ep - pointer to usb_ep instance.
405* @request - pointer to usb_request instance.
406*
407* @return int - 0 on success, negative on error.
408*/
409static void dwc3_msm_req_complete_func(struct usb_ep *ep,
410 struct usb_request *request)
411{
412 struct dwc3_ep *dep = to_dwc3_ep(ep);
413 struct dwc3 *dwc = dep->dwc;
414 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
415 struct dwc3_msm_req_complete *req_complete = NULL;
416
417 /* Find original request complete function and remove it from list */
418 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
419 if (req_complete->req == request)
420 break;
421 }
422 if (!req_complete || req_complete->req != request) {
423 dev_err(dep->dwc->dev, "%s: could not find the request\n",
424 __func__);
425 return;
426 }
427 list_del(&req_complete->list_item);
428
429 /*
430 * Release another one TRB to the pool since DBM queue took 2 TRBs
431 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
432 * released only one.
433 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700434 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700435
436 /* Unconfigure dbm ep */
437 dbm_ep_unconfig(mdwc->dbm, dep->number);
438
439 /*
440 * If this is the last endpoint we unconfigured, than reset also
441 * the event buffers; unless unconfiguring the ep due to lpm,
442 * in which case the event buffer only gets reset during the
443 * block reset.
444 */
445 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
446 !dbm_reset_ep_after_lpm(mdwc->dbm))
447 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
448
449 /*
450 * Call original complete function, notice that dwc->lock is already
451 * taken by the caller of this function (dwc3_gadget_giveback()).
452 */
453 request->complete = req_complete->orig_complete;
454 if (request->complete)
455 request->complete(ep, request);
456
457 kfree(req_complete);
458}
459
460
461/**
462* Helper function
463*
464* Reset DBM endpoint.
465*
466* @mdwc - pointer to dwc3_msm instance.
467* @dep - pointer to dwc3_ep instance.
468*
469* @return int - 0 on success, negative on error.
470*/
471static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
472{
473 int ret;
474
475 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
476
477 /* Reset the dbm endpoint */
478 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
479 if (ret) {
480 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
481 __func__);
482 return ret;
483 }
484
485 /*
486 * The necessary delay between asserting and deasserting the dbm ep
487 * reset is based on the number of active endpoints. If there is more
488 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
489 * delay will suffice.
490 */
491 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
492 usleep_range(1000, 1200);
493 else
494 udelay(10);
495 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
496 if (ret) {
497 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
498 __func__);
499 return ret;
500 }
501
502 return 0;
503}
504
505/**
506* Reset the DBM endpoint which is linked to the given USB endpoint.
507*
508* @usb_ep - pointer to usb_ep instance.
509*
510* @return int - 0 on success, negative on error.
511*/
512
513int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
514{
515 struct dwc3_ep *dep = to_dwc3_ep(ep);
516 struct dwc3 *dwc = dep->dwc;
517 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
518
519 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
520}
521EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
522
523
524/**
525* Helper function.
526* See the header of the dwc3_msm_ep_queue function.
527*
528* @dwc3_ep - pointer to dwc3_ep instance.
529* @req - pointer to dwc3_request instance.
530*
531* @return int - 0 on success, negative on error.
532*/
533static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
534{
535 struct dwc3_trb *trb;
536 struct dwc3_trb *trb_link;
537 struct dwc3_gadget_ep_cmd_params params;
538 u32 cmd;
539 int ret = 0;
540
Mayank Rana83ad5822016-08-09 14:17:22 -0700541 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700542 * this request is issued with start transfer. The request will be out
543 * from this list in 2 cases. The first is that the transfer will be
544 * completed (not if the transfer is endless using a circular TRBs with
545 * with link TRB). The second case is an option to do stop stransfer,
546 * this can be initiated by the function driver when calling dequeue.
547 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700548 req->started = true;
549 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700550
551 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700552 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
553 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700554 memset(trb, 0, sizeof(*trb));
555
556 req->trb = trb;
557 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
558 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
559 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
560 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
561 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
562
563 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700564 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
565 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700566 memset(trb_link, 0, sizeof(*trb_link));
567
568 trb_link->bpl = lower_32_bits(req->trb_dma);
569 trb_link->bph = DBM_TRB_BIT |
570 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
571 trb_link->size = 0;
572 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
573
574 /*
575 * Now start the transfer
576 */
577 memset(&params, 0, sizeof(params));
578 params.param0 = 0; /* TDAddr High */
579 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
580
581 /* DBM requires IOC to be set */
582 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700583 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700584 if (ret < 0) {
585 dev_dbg(dep->dwc->dev,
586 "%s: failed to send STARTTRANSFER command\n",
587 __func__);
588
589 list_del(&req->list);
590 return ret;
591 }
592 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700593 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700594
595 return ret;
596}
597
598/**
599* Queue a usb request to the DBM endpoint.
600* This function should be called after the endpoint
601* was enabled by the ep_enable.
602*
603* This function prepares special structure of TRBs which
604* is familiar with the DBM HW, so it will possible to use
605* this endpoint in DBM mode.
606*
607* The TRBs prepared by this function, is one normal TRB
608* which point to a fake buffer, followed by a link TRB
609* that points to the first TRB.
610*
611* The API of this function follow the regular API of
612* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
613*
614* @usb_ep - pointer to usb_ep instance.
615* @request - pointer to usb_request instance.
616* @gfp_flags - possible flags.
617*
618* @return int - 0 on success, negative on error.
619*/
620static int dwc3_msm_ep_queue(struct usb_ep *ep,
621 struct usb_request *request, gfp_t gfp_flags)
622{
623 struct dwc3_request *req = to_dwc3_request(request);
624 struct dwc3_ep *dep = to_dwc3_ep(ep);
625 struct dwc3 *dwc = dep->dwc;
626 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
627 struct dwc3_msm_req_complete *req_complete;
628 unsigned long flags;
629 int ret = 0, size;
630 u8 bam_pipe;
631 bool producer;
632 bool disable_wb;
633 bool internal_mem;
634 bool ioc;
635 bool superspeed;
636
637 if (!(request->udc_priv & MSM_SPS_MODE)) {
638 /* Not SPS mode, call original queue */
639 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
640 __func__);
641
642 return (mdwc->original_ep_ops[dep->number])->queue(ep,
643 request,
644 gfp_flags);
645 }
646
647 /* HW restriction regarding TRB size (8KB) */
648 if (req->request.length < 0x2000) {
649 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
650 return -EINVAL;
651 }
652
653 /*
654 * Override req->complete function, but before doing that,
655 * store it's original pointer in the req_complete_list.
656 */
657 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
658 if (!req_complete)
659 return -ENOMEM;
660
661 req_complete->req = request;
662 req_complete->orig_complete = request->complete;
663 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
664 request->complete = dwc3_msm_req_complete_func;
665
666 /*
667 * Configure the DBM endpoint
668 */
669 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
670 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
671 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
672 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
673 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
674
675 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
676 disable_wb, internal_mem, ioc);
677 if (ret < 0) {
678 dev_err(mdwc->dev,
679 "error %d after calling dbm_ep_config\n", ret);
680 return ret;
681 }
682
683 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
684 __func__, request, ep->name, request->length);
685 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
686 dbm_event_buffer_config(mdwc->dbm,
687 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
688 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
689 DWC3_GEVNTSIZ_SIZE(size));
690
691 /*
692 * We must obtain the lock of the dwc3 core driver,
693 * including disabling interrupts, so we will be sure
694 * that we are the only ones that configure the HW device
695 * core and ensure that we queuing the request will finish
696 * as soon as possible so we will release back the lock.
697 */
698 spin_lock_irqsave(&dwc->lock, flags);
699 if (!dep->endpoint.desc) {
700 dev_err(mdwc->dev,
701 "%s: trying to queue request %p to disabled ep %s\n",
702 __func__, request, ep->name);
703 ret = -EPERM;
704 goto err;
705 }
706
707 if (dep->number == 0 || dep->number == 1) {
708 dev_err(mdwc->dev,
709 "%s: trying to queue dbm request %p to control ep %s\n",
710 __func__, request, ep->name);
711 ret = -EPERM;
712 goto err;
713 }
714
715
Mayank Rana83ad5822016-08-09 14:17:22 -0700716 if (dep->trb_dequeue != dep->trb_enqueue ||
717 !list_empty(&dep->pending_list)
718 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700719 dev_err(mdwc->dev,
720 "%s: trying to queue dbm request %p tp ep %s\n",
721 __func__, request, ep->name);
722 ret = -EPERM;
723 goto err;
724 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700725 dep->trb_dequeue = 0;
726 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700727 }
728
729 ret = __dwc3_msm_ep_queue(dep, req);
730 if (ret < 0) {
731 dev_err(mdwc->dev,
732 "error %d after calling __dwc3_msm_ep_queue\n", ret);
733 goto err;
734 }
735
736 spin_unlock_irqrestore(&dwc->lock, flags);
737 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
738 dbm_set_speed(mdwc->dbm, (u8)superspeed);
739
740 return 0;
741
742err:
743 spin_unlock_irqrestore(&dwc->lock, flags);
744 kfree(req_complete);
745 return ret;
746}
747
748/*
749* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
750*
751* @usb_ep - pointer to usb_ep instance.
752*
753* @return int - XferRscIndex
754*/
755static inline int gsi_get_xfer_index(struct usb_ep *ep)
756{
757 struct dwc3_ep *dep = to_dwc3_ep(ep);
758
759 return dep->resource_index;
760}
761
762/*
763* Fills up the GSI channel information needed in call to IPA driver
764* for GSI channel creation.
765*
766* @usb_ep - pointer to usb_ep instance.
767* @ch_info - output parameter with requested channel info
768*/
769static void gsi_get_channel_info(struct usb_ep *ep,
770 struct gsi_channel_info *ch_info)
771{
772 struct dwc3_ep *dep = to_dwc3_ep(ep);
773 int last_trb_index = 0;
774 struct dwc3 *dwc = dep->dwc;
775 struct usb_gsi_request *request = ch_info->ch_req;
776
777 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
778 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700779 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700780 ch_info->depcmd_hi_addr = 0;
781
782 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
783 &dep->trb_pool[0]);
784 /* Convert to multipled of 1KB */
785 ch_info->const_buffer_size = request->buf_len/1024;
786
787 /* IN direction */
788 if (dep->direction) {
789 /*
790 * Multiply by size of each TRB for xfer_ring_len in bytes.
791 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
792 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
793 */
794 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
795 last_trb_index = 2 * request->num_bufs + 2;
796 } else { /* OUT direction */
797 /*
798 * Multiply by size of each TRB for xfer_ring_len in bytes.
799 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
800 * LINK TRB.
801 */
802 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
803 last_trb_index = request->num_bufs + 1;
804 }
805
806 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
807 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
808 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
809 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
810 DWC3_GEVNTCOUNT(ep->ep_intr_num));
811 ch_info->gevntcount_hi_addr = 0;
812
813 dev_dbg(dwc->dev,
814 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
815 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
816 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
817}
818
819/*
820* Perform StartXfer on GSI EP. Stores XferRscIndex.
821*
822* @usb_ep - pointer to usb_ep instance.
823*
824* @return int - 0 on success
825*/
826static int gsi_startxfer_for_ep(struct usb_ep *ep)
827{
828 int ret;
829 struct dwc3_gadget_ep_cmd_params params;
830 u32 cmd;
831 struct dwc3_ep *dep = to_dwc3_ep(ep);
832 struct dwc3 *dwc = dep->dwc;
833
834 memset(&params, 0, sizeof(params));
835 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
836 params.param0 |= (ep->ep_intr_num << 16);
837 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
838 &dep->trb_pool[0]));
839 cmd = DWC3_DEPCMD_STARTTRANSFER;
840 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700841 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700842
843 if (ret < 0)
844 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700845 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700846 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
847 return ret;
848}
849
850/*
851* Store Ring Base and Doorbell Address for GSI EP
852* for GSI channel creation.
853*
854* @usb_ep - pointer to usb_ep instance.
855* @dbl_addr - Doorbell address obtained from IPA driver
856*/
857static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
858{
859 struct dwc3_ep *dep = to_dwc3_ep(ep);
860 struct dwc3 *dwc = dep->dwc;
861 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
862 int n = ep->ep_intr_num - 1;
863
864 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
865 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
866 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
867
868 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
869 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
870 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
871 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
872}
873
874/*
875* Rings Doorbell for IN GSI Channel
876*
877* @usb_ep - pointer to usb_ep instance.
878* @request - pointer to GSI request. This is used to pass in the
879* address of the GSI doorbell obtained from IPA driver
880*/
881static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
882{
883 void __iomem *gsi_dbl_address_lsb;
884 void __iomem *gsi_dbl_address_msb;
885 dma_addr_t offset;
886 u64 dbl_addr = *((u64 *)request->buf_base_addr);
887 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
888 u32 dbl_hi_addr = (dbl_addr >> 32);
889 u32 num_trbs = (request->num_bufs * 2 + 2);
890 struct dwc3_ep *dep = to_dwc3_ep(ep);
891 struct dwc3 *dwc = dep->dwc;
892 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
893
894 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
895 dbl_lo_addr, sizeof(u32));
896 if (!gsi_dbl_address_lsb)
897 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
898
899 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
900 dbl_hi_addr, sizeof(u32));
901 if (!gsi_dbl_address_msb)
902 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
903
904 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
905 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
906 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
907
908 writel_relaxed(offset, gsi_dbl_address_lsb);
909 writel_relaxed(0, gsi_dbl_address_msb);
910}
911
912/*
913* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
914*
915* @usb_ep - pointer to usb_ep instance.
916* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
917*
918* @return int - 0 on success
919*/
920static int gsi_updatexfer_for_ep(struct usb_ep *ep,
921 struct usb_gsi_request *request)
922{
923 int i;
924 int ret;
925 u32 cmd;
926 int num_trbs = request->num_bufs + 1;
927 struct dwc3_trb *trb;
928 struct dwc3_gadget_ep_cmd_params params;
929 struct dwc3_ep *dep = to_dwc3_ep(ep);
930 struct dwc3 *dwc = dep->dwc;
931
932 for (i = 0; i < num_trbs - 1; i++) {
933 trb = &dep->trb_pool[i];
934 trb->ctrl |= DWC3_TRB_CTRL_HWO;
935 }
936
937 memset(&params, 0, sizeof(params));
938 cmd = DWC3_DEPCMD_UPDATETRANSFER;
939 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700940 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700941 dep->flags |= DWC3_EP_BUSY;
942 if (ret < 0)
943 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
944 return ret;
945}
946
947/*
948* Perform EndXfer on particular GSI EP.
949*
950* @usb_ep - pointer to usb_ep instance.
951*/
952static void gsi_endxfer_for_ep(struct usb_ep *ep)
953{
954 struct dwc3_ep *dep = to_dwc3_ep(ep);
955 struct dwc3 *dwc = dep->dwc;
956
957 dwc3_stop_active_transfer(dwc, dep->number, true);
958}
959
960/*
961* Allocates and configures TRBs for GSI EPs.
962*
963* @usb_ep - pointer to usb_ep instance.
964* @request - pointer to GSI request.
965*
966* @return int - 0 on success
967*/
968static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
969{
970 int i = 0;
971 dma_addr_t buffer_addr = req->dma;
972 struct dwc3_ep *dep = to_dwc3_ep(ep);
973 struct dwc3 *dwc = dep->dwc;
974 struct dwc3_trb *trb;
975 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
976 : (req->num_bufs + 1);
977
978 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
979 num_trbs * sizeof(struct dwc3_trb),
980 num_trbs * sizeof(struct dwc3_trb), 0);
981 if (!dep->trb_dma_pool) {
982 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
983 dep->name);
984 return -ENOMEM;
985 }
986
987 dep->num_trbs = num_trbs;
988
989 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
990 GFP_KERNEL, &dep->trb_pool_dma);
991 if (!dep->trb_pool) {
992 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
993 dep->name);
994 return -ENOMEM;
995 }
996
997 /* IN direction */
998 if (dep->direction) {
999 for (i = 0; i < num_trbs ; i++) {
1000 trb = &dep->trb_pool[i];
1001 memset(trb, 0, sizeof(*trb));
1002 /* Set up first n+1 TRBs for ZLPs */
1003 if (i < (req->num_bufs + 1)) {
1004 trb->bpl = 0;
1005 trb->bph = 0;
1006 trb->size = 0;
1007 trb->ctrl = DWC3_TRBCTL_NORMAL
1008 | DWC3_TRB_CTRL_IOC;
1009 continue;
1010 }
1011
1012 /* Setup n TRBs pointing to valid buffers */
1013 trb->bpl = lower_32_bits(buffer_addr);
1014 trb->bph = 0;
1015 trb->size = 0;
1016 trb->ctrl = DWC3_TRBCTL_NORMAL
1017 | DWC3_TRB_CTRL_IOC;
1018 buffer_addr += req->buf_len;
1019
1020 /* Set up the Link TRB at the end */
1021 if (i == (num_trbs - 1)) {
1022 trb->bpl = dwc3_trb_dma_offset(dep,
1023 &dep->trb_pool[0]);
1024 trb->bph = (1 << 23) | (1 << 21)
1025 | (ep->ep_intr_num << 16);
1026 trb->size = 0;
1027 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1028 | DWC3_TRB_CTRL_HWO;
1029 }
1030 }
1031 } else { /* OUT direction */
1032
1033 for (i = 0; i < num_trbs ; i++) {
1034
1035 trb = &dep->trb_pool[i];
1036 memset(trb, 0, sizeof(*trb));
1037 trb->bpl = lower_32_bits(buffer_addr);
1038 trb->bph = 0;
1039 trb->size = req->buf_len;
1040 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1041 | DWC3_TRB_CTRL_CSP
1042 | DWC3_TRB_CTRL_ISP_IMI;
1043 buffer_addr += req->buf_len;
1044
1045 /* Set up the Link TRB at the end */
1046 if (i == (num_trbs - 1)) {
1047 trb->bpl = dwc3_trb_dma_offset(dep,
1048 &dep->trb_pool[0]);
1049 trb->bph = (1 << 23) | (1 << 21)
1050 | (ep->ep_intr_num << 16);
1051 trb->size = 0;
1052 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1053 | DWC3_TRB_CTRL_HWO;
1054 }
1055 }
1056 }
1057 return 0;
1058}
1059
1060/*
1061* Frees TRBs for GSI EPs.
1062*
1063* @usb_ep - pointer to usb_ep instance.
1064*
1065*/
1066static void gsi_free_trbs(struct usb_ep *ep)
1067{
1068 struct dwc3_ep *dep = to_dwc3_ep(ep);
1069
1070 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1071 return;
1072
1073 /* Free TRBs and TRB pool for EP */
1074 if (dep->trb_dma_pool) {
1075 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1076 dep->trb_pool_dma);
1077 dma_pool_destroy(dep->trb_dma_pool);
1078 dep->trb_pool = NULL;
1079 dep->trb_pool_dma = 0;
1080 dep->trb_dma_pool = NULL;
1081 }
1082}
1083/*
1084* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1085*
1086* @usb_ep - pointer to usb_ep instance.
1087* @request - pointer to GSI request.
1088*/
1089static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1090{
1091 struct dwc3_ep *dep = to_dwc3_ep(ep);
1092 struct dwc3 *dwc = dep->dwc;
1093 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1094 struct dwc3_gadget_ep_cmd_params params;
1095 const struct usb_endpoint_descriptor *desc = ep->desc;
1096 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1097 u32 reg;
1098
1099 memset(&params, 0x00, sizeof(params));
1100
1101 /* Configure GSI EP */
1102 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1103 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1104
1105 /* Burst size is only needed in SuperSpeed mode */
1106 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1107 u32 burst = dep->endpoint.maxburst - 1;
1108
1109 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1110 }
1111
1112 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1113 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1114 | DWC3_DEPCFG_STREAM_EVENT_EN;
1115 dep->stream_capable = true;
1116 }
1117
1118 /* Set EP number */
1119 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1120
1121 /* Set interrupter number for GSI endpoints */
1122 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1123
1124 /* Enable XferInProgress and XferComplete Interrupts */
1125 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1126 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1127 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1128 /*
1129 * We must use the lower 16 TX FIFOs even though
1130 * HW might have more
1131 */
1132 /* Remove FIFO Number for GSI EP*/
1133 if (dep->direction)
1134 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1135
1136 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1137
1138 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1139 params.param0, params.param1, params.param2, dep->name);
1140
Mayank Rana83ad5822016-08-09 14:17:22 -07001141 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001142
1143 /* Set XferRsc Index for GSI EP */
1144 if (!(dep->flags & DWC3_EP_ENABLED)) {
1145 memset(&params, 0x00, sizeof(params));
1146 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001147 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001148 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1149
1150 dep->endpoint.desc = desc;
1151 dep->comp_desc = comp_desc;
1152 dep->type = usb_endpoint_type(desc);
1153 dep->flags |= DWC3_EP_ENABLED;
1154 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1155 reg |= DWC3_DALEPENA_EP(dep->number);
1156 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1157 }
1158
1159}
1160
1161/*
1162* Enables USB wrapper for GSI
1163*
1164* @usb_ep - pointer to usb_ep instance.
1165*/
1166static void gsi_enable(struct usb_ep *ep)
1167{
1168 struct dwc3_ep *dep = to_dwc3_ep(ep);
1169 struct dwc3 *dwc = dep->dwc;
1170 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1171
1172 dwc3_msm_write_reg_field(mdwc->base,
1173 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1174 dwc3_msm_write_reg_field(mdwc->base,
1175 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1176 dwc3_msm_write_reg_field(mdwc->base,
1177 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1178 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1179 dwc3_msm_write_reg_field(mdwc->base,
1180 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1181}
1182
1183/*
1184* Block or allow doorbell towards GSI
1185*
1186* @usb_ep - pointer to usb_ep instance.
1187* @request - pointer to GSI request. In this case num_bufs is used as a bool
1188* to set or clear the doorbell bit
1189*/
1190static void gsi_set_clear_dbell(struct usb_ep *ep,
1191 bool block_db)
1192{
1193
1194 struct dwc3_ep *dep = to_dwc3_ep(ep);
1195 struct dwc3 *dwc = dep->dwc;
1196 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1197
1198 dwc3_msm_write_reg_field(mdwc->base,
1199 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1200}
1201
1202/*
1203* Performs necessary checks before stopping GSI channels
1204*
1205* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1206*/
1207static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1208{
1209 u32 timeout = 1500;
1210 u32 reg = 0;
1211 struct dwc3_ep *dep = to_dwc3_ep(ep);
1212 struct dwc3 *dwc = dep->dwc;
1213 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1214
1215 while (dwc3_msm_read_reg_field(mdwc->base,
1216 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1217 if (!timeout--) {
1218 dev_err(mdwc->dev,
1219 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1220 return false;
1221 }
1222 }
1223 /* Check for U3 only if we are not handling Function Suspend */
1224 if (!f_suspend) {
1225 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1226 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1227 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1228 return false;
1229 }
1230 }
1231
1232 return true;
1233}
1234
1235
1236/**
1237* Performs GSI operations or GSI EP related operations.
1238*
1239* @usb_ep - pointer to usb_ep instance.
1240* @op_data - pointer to opcode related data.
1241* @op - GSI related or GSI EP related op code.
1242*
1243* @return int - 0 on success, negative on error.
1244* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1245*/
1246static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1247 void *op_data, enum gsi_ep_op op)
1248{
1249 u32 ret = 0;
1250 struct dwc3_ep *dep = to_dwc3_ep(ep);
1251 struct dwc3 *dwc = dep->dwc;
1252 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1253 struct usb_gsi_request *request;
1254 struct gsi_channel_info *ch_info;
1255 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001256 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001257
1258 switch (op) {
1259 case GSI_EP_OP_PREPARE_TRBS:
1260 request = (struct usb_gsi_request *)op_data;
1261 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1262 ret = gsi_prepare_trbs(ep, request);
1263 break;
1264 case GSI_EP_OP_FREE_TRBS:
1265 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1266 gsi_free_trbs(ep);
1267 break;
1268 case GSI_EP_OP_CONFIG:
1269 request = (struct usb_gsi_request *)op_data;
1270 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001271 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001272 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001273 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001274 break;
1275 case GSI_EP_OP_STARTXFER:
1276 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001277 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001278 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001279 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001280 break;
1281 case GSI_EP_OP_GET_XFER_IDX:
1282 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1283 ret = gsi_get_xfer_index(ep);
1284 break;
1285 case GSI_EP_OP_STORE_DBL_INFO:
1286 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1287 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1288 break;
1289 case GSI_EP_OP_ENABLE_GSI:
1290 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1291 gsi_enable(ep);
1292 break;
1293 case GSI_EP_OP_GET_CH_INFO:
1294 ch_info = (struct gsi_channel_info *)op_data;
1295 gsi_get_channel_info(ep, ch_info);
1296 break;
1297 case GSI_EP_OP_RING_IN_DB:
1298 request = (struct usb_gsi_request *)op_data;
1299 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1300 gsi_ring_in_db(ep, request);
1301 break;
1302 case GSI_EP_OP_UPDATEXFER:
1303 request = (struct usb_gsi_request *)op_data;
1304 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001305 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001306 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001307 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001308 break;
1309 case GSI_EP_OP_ENDXFER:
1310 request = (struct usb_gsi_request *)op_data;
1311 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001312 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001313 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001314 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001315 break;
1316 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1317 block_db = *((bool *)op_data);
1318 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1319 block_db);
1320 gsi_set_clear_dbell(ep, block_db);
1321 break;
1322 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1323 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1324 f_suspend = *((bool *)op_data);
1325 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1326 break;
1327 case GSI_EP_OP_DISABLE:
1328 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1329 ret = ep->ops->disable(ep);
1330 break;
1331 default:
1332 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1333 }
1334
1335 return ret;
1336}
1337
1338/**
1339 * Configure MSM endpoint.
1340 * This function do specific configurations
1341 * to an endpoint which need specific implementaion
1342 * in the MSM architecture.
1343 *
1344 * This function should be called by usb function/class
1345 * layer which need a support from the specific MSM HW
1346 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1347 *
1348 * @ep - a pointer to some usb_ep instance
1349 *
1350 * @return int - 0 on success, negetive on error.
1351 */
1352int msm_ep_config(struct usb_ep *ep)
1353{
1354 struct dwc3_ep *dep = to_dwc3_ep(ep);
1355 struct dwc3 *dwc = dep->dwc;
1356 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1357 struct usb_ep_ops *new_ep_ops;
1358
1359
1360 /* Save original ep ops for future restore*/
1361 if (mdwc->original_ep_ops[dep->number]) {
1362 dev_err(mdwc->dev,
1363 "ep [%s,%d] already configured as msm endpoint\n",
1364 ep->name, dep->number);
1365 return -EPERM;
1366 }
1367 mdwc->original_ep_ops[dep->number] = ep->ops;
1368
1369 /* Set new usb ops as we like */
1370 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1371 if (!new_ep_ops)
1372 return -ENOMEM;
1373
1374 (*new_ep_ops) = (*ep->ops);
1375 new_ep_ops->queue = dwc3_msm_ep_queue;
1376 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1377 ep->ops = new_ep_ops;
1378
1379 /*
1380 * Do HERE more usb endpoint configurations
1381 * which are specific to MSM.
1382 */
1383
1384 return 0;
1385}
1386EXPORT_SYMBOL(msm_ep_config);
1387
1388/**
1389 * Un-configure MSM endpoint.
1390 * Tear down configurations done in the
1391 * dwc3_msm_ep_config function.
1392 *
1393 * @ep - a pointer to some usb_ep instance
1394 *
1395 * @return int - 0 on success, negative on error.
1396 */
1397int msm_ep_unconfig(struct usb_ep *ep)
1398{
1399 struct dwc3_ep *dep = to_dwc3_ep(ep);
1400 struct dwc3 *dwc = dep->dwc;
1401 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1402 struct usb_ep_ops *old_ep_ops;
1403
1404 /* Restore original ep ops */
1405 if (!mdwc->original_ep_ops[dep->number]) {
1406 dev_err(mdwc->dev,
1407 "ep [%s,%d] was not configured as msm endpoint\n",
1408 ep->name, dep->number);
1409 return -EINVAL;
1410 }
1411 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1412 ep->ops = mdwc->original_ep_ops[dep->number];
1413 mdwc->original_ep_ops[dep->number] = NULL;
1414 kfree(old_ep_ops);
1415
1416 /*
1417 * Do HERE more usb endpoint un-configurations
1418 * which are specific to MSM.
1419 */
1420
1421 return 0;
1422}
1423EXPORT_SYMBOL(msm_ep_unconfig);
1424#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1425
1426static void dwc3_resume_work(struct work_struct *w);
1427
1428static void dwc3_restart_usb_work(struct work_struct *w)
1429{
1430 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1431 restart_usb_work);
1432 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1433 unsigned int timeout = 50;
1434
1435 dev_dbg(mdwc->dev, "%s\n", __func__);
1436
1437 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1438 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1439 return;
1440 }
1441
1442 /* guard against concurrent VBUS handling */
1443 mdwc->in_restart = true;
1444
1445 if (!mdwc->vbus_active) {
1446 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1447 dwc->err_evt_seen = false;
1448 mdwc->in_restart = false;
1449 return;
1450 }
1451
Mayank Rana511f3b22016-08-02 12:00:11 -07001452 /* Reset active USB connection */
1453 dwc3_resume_work(&mdwc->resume_work);
1454
1455 /* Make sure disconnect is processed before sending connect */
1456 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1457 msleep(20);
1458
1459 if (!timeout) {
1460 dev_dbg(mdwc->dev,
1461 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001462 pm_runtime_suspend(mdwc->dev);
1463 }
1464
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301465 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001466 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301467 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001468 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001469
1470 dwc->err_evt_seen = false;
1471 flush_delayed_work(&mdwc->sm_work);
1472}
1473
1474/*
1475 * Check whether the DWC3 requires resetting the ep
1476 * after going to Low Power Mode (lpm)
1477 */
1478bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1479{
1480 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1481 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1482
1483 return dbm_reset_ep_after_lpm(mdwc->dbm);
1484}
1485EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1486
1487/*
1488 * Config Global Distributed Switch Controller (GDSC)
1489 * to support controller power collapse
1490 */
1491static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1492{
1493 int ret;
1494
1495 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1496 return -EPERM;
1497
1498 if (on) {
1499 ret = regulator_enable(mdwc->dwc3_gdsc);
1500 if (ret) {
1501 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1502 return ret;
1503 }
1504 } else {
1505 ret = regulator_disable(mdwc->dwc3_gdsc);
1506 if (ret) {
1507 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1508 return ret;
1509 }
1510 }
1511
1512 return ret;
1513}
1514
1515static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1516{
1517 int ret = 0;
1518
1519 if (assert) {
1520 disable_irq(mdwc->pwr_event_irq);
1521 /* Using asynchronous block reset to the hardware */
1522 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1523 clk_disable_unprepare(mdwc->utmi_clk);
1524 clk_disable_unprepare(mdwc->sleep_clk);
1525 clk_disable_unprepare(mdwc->core_clk);
1526 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301527 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001528 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301529 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001530 } else {
1531 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301532 ret = reset_control_deassert(mdwc->core_reset);
1533 if (ret)
1534 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001535 ndelay(200);
1536 clk_prepare_enable(mdwc->iface_clk);
1537 clk_prepare_enable(mdwc->core_clk);
1538 clk_prepare_enable(mdwc->sleep_clk);
1539 clk_prepare_enable(mdwc->utmi_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001540 enable_irq(mdwc->pwr_event_irq);
1541 }
1542
1543 return ret;
1544}
1545
1546static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1547{
1548 u32 guctl, gfladj = 0;
1549
1550 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1551 guctl &= ~DWC3_GUCTL_REFCLKPER;
1552
1553 /* GFLADJ register is used starting with revision 2.50a */
1554 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1555 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1556 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1557 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1558 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1559 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1560 }
1561
1562 /* Refer to SNPS Databook Table 6-55 for calculations used */
1563 switch (mdwc->utmi_clk_rate) {
1564 case 19200000:
1565 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1566 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1567 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1568 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1569 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1570 break;
1571 case 24000000:
1572 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1573 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1574 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1575 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1576 break;
1577 default:
1578 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1579 mdwc->utmi_clk_rate);
1580 break;
1581 }
1582
1583 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1584 if (gfladj)
1585 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1586}
1587
1588/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1589static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1590{
1591 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1592 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1593 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1594 BIT(2), 1);
1595
1596 /*
1597 * Enable master clock for RAMs to allow BAM to access RAMs when
1598 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1599 * are seen where RAM clocks get turned OFF in SS mode
1600 */
1601 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1602 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1603
1604}
1605
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001606static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1607{
1608 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1609 vbus_draw_work);
1610 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1611
1612 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1613}
1614
Mayank Rana511f3b22016-08-02 12:00:11 -07001615static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1616{
1617 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001618 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001619 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001620 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001621
1622 switch (event) {
1623 case DWC3_CONTROLLER_ERROR_EVENT:
1624 dev_info(mdwc->dev,
1625 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1626 dwc->irq_cnt);
1627
1628 dwc3_gadget_disable_irq(dwc);
1629
1630 /* prevent core from generating interrupts until recovery */
1631 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1632 reg |= DWC3_GCTL_CORESOFTRESET;
1633 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1634
1635 /* restart USB which performs full reset and reconnect */
1636 schedule_work(&mdwc->restart_usb_work);
1637 break;
1638 case DWC3_CONTROLLER_RESET_EVENT:
1639 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1640 /* HS & SSPHYs get reset as part of core soft reset */
1641 dwc3_msm_qscratch_reg_init(mdwc);
1642 break;
1643 case DWC3_CONTROLLER_POST_RESET_EVENT:
1644 dev_dbg(mdwc->dev,
1645 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1646
1647 /*
1648 * Below sequence is used when controller is working without
1649 * having ssphy and only USB high speed is supported.
1650 */
1651 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1652 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1653 dwc3_msm_read_reg(mdwc->base,
1654 QSCRATCH_GENERAL_CFG)
1655 | PIPE_UTMI_CLK_DIS);
1656
1657 usleep_range(2, 5);
1658
1659
1660 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1661 dwc3_msm_read_reg(mdwc->base,
1662 QSCRATCH_GENERAL_CFG)
1663 | PIPE_UTMI_CLK_SEL
1664 | PIPE3_PHYSTATUS_SW);
1665
1666 usleep_range(2, 5);
1667
1668 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1669 dwc3_msm_read_reg(mdwc->base,
1670 QSCRATCH_GENERAL_CFG)
1671 & ~PIPE_UTMI_CLK_DIS);
1672 }
1673
1674 dwc3_msm_update_ref_clk(mdwc);
1675 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1676 break;
1677 case DWC3_CONTROLLER_CONNDONE_EVENT:
1678 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1679 /*
1680 * Add power event if the dbm indicates coming out of L1 by
1681 * interrupt
1682 */
1683 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1684 dwc3_msm_write_reg_field(mdwc->base,
1685 PWR_EVNT_IRQ_MASK_REG,
1686 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1687
1688 atomic_set(&dwc->in_lpm, 0);
1689 break;
1690 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1691 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1692 if (dwc->enable_bus_suspend) {
1693 mdwc->suspend = dwc->b_suspend;
1694 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1695 }
1696 break;
1697 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1698 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001699 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001700 break;
1701 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1702 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001703 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001704 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001705 case DWC3_GSI_EVT_BUF_ALLOC:
1706 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1707
1708 if (!mdwc->num_gsi_event_buffers)
1709 break;
1710
1711 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1712 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1713 GFP_KERNEL);
1714 if (!mdwc->gsi_ev_buff) {
1715 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1716 break;
1717 }
1718
1719 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1720
1721 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1722 if (!evt)
1723 break;
1724 evt->dwc = dwc;
1725 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1726 evt->buf = dma_alloc_coherent(dwc->dev,
1727 DWC3_EVENT_BUFFERS_SIZE,
1728 &evt->dma, GFP_KERNEL);
1729 if (!evt->buf) {
1730 dev_err(dwc->dev,
1731 "can't allocate gsi_evt_buf(%d)\n", i);
1732 break;
1733 }
1734 mdwc->gsi_ev_buff[i] = evt;
1735 }
1736 break;
1737 case DWC3_GSI_EVT_BUF_SETUP:
1738 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1739 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1740 evt = mdwc->gsi_ev_buff[i];
1741 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1742 evt->buf, (unsigned long long) evt->dma,
1743 evt->length);
1744 memset(evt->buf, 0, evt->length);
1745 evt->lpos = 0;
1746 /*
1747 * Primary event buffer is programmed with registers
1748 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1749 * program USB GSI related event buffer with DWC3
1750 * controller.
1751 */
1752 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1753 lower_32_bits(evt->dma));
1754 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1755 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1756 DWC3_GEVENT_TYPE_GSI) |
1757 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1758 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1759 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1760 ((evt->length) & 0xffff));
1761 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1762 }
1763 break;
1764 case DWC3_GSI_EVT_BUF_CLEANUP:
1765 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
1766 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1767 evt = mdwc->gsi_ev_buff[i];
1768 evt->lpos = 0;
1769 /*
1770 * Primary event buffer is programmed with registers
1771 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1772 * program USB GSI related event buffer with DWC3
1773 * controller.
1774 */
1775 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1776 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1777 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1778 DWC3_GEVNTSIZ_INTMASK |
1779 DWC3_GEVNTSIZ_SIZE((i+1)));
1780 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1781 }
1782 break;
1783 case DWC3_GSI_EVT_BUF_FREE:
1784 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
1785 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1786 evt = mdwc->gsi_ev_buff[i];
1787 if (evt)
1788 dma_free_coherent(dwc->dev, evt->length,
1789 evt->buf, evt->dma);
1790 }
1791 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001792 default:
1793 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1794 break;
1795 }
1796}
1797
1798static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1799{
1800 int ret = 0;
1801
1802 if (core_reset) {
1803 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1804 if (ret)
1805 return;
1806
1807 usleep_range(1000, 1200);
1808 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1809 if (ret)
1810 return;
1811
1812 usleep_range(10000, 12000);
1813 }
1814
1815 if (mdwc->dbm) {
1816 /* Reset the DBM */
1817 dbm_soft_reset(mdwc->dbm, 1);
1818 usleep_range(1000, 1200);
1819 dbm_soft_reset(mdwc->dbm, 0);
1820
1821 /*enable DBM*/
1822 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1823 DBM_EN_MASK, 0x1);
1824 dbm_enable(mdwc->dbm);
1825 }
1826}
1827
1828static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1829{
1830 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1831 u32 val;
1832
1833 /* Configure AHB2PHY for one wait state read/write */
1834 if (mdwc->ahb2phy_base) {
1835 clk_prepare_enable(mdwc->cfg_ahb_clk);
1836 val = readl_relaxed(mdwc->ahb2phy_base +
1837 PERIPH_SS_AHB2PHY_TOP_CFG);
1838 if (val != ONE_READ_WRITE_WAIT) {
1839 writel_relaxed(ONE_READ_WRITE_WAIT,
1840 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1841 /* complete above write before configuring USB PHY. */
1842 mb();
1843 }
1844 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1845 }
1846
1847 if (!mdwc->init) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001848 dwc3_core_pre_init(dwc);
1849 mdwc->init = true;
1850 }
1851
1852 dwc3_core_init(dwc);
1853 /* Re-configure event buffers */
1854 dwc3_event_buffers_setup(dwc);
1855}
1856
1857static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1858{
1859 unsigned long timeout;
1860 u32 reg = 0;
1861
1862 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301863 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001864 if (!atomic_read(&mdwc->in_p3)) {
1865 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1866 return -EBUSY;
1867 }
1868 }
1869
1870 /* Clear previous L2 events */
1871 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1872 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1873
1874 /* Prepare HSPHY for suspend */
1875 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1876 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1877 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1878
1879 /* Wait for PHY to go into L2 */
1880 timeout = jiffies + msecs_to_jiffies(5);
1881 while (!time_after(jiffies, timeout)) {
1882 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1883 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1884 break;
1885 }
1886 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1887 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1888
1889 /* Clear L2 event bit */
1890 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1891 PWR_EVNT_LPM_IN_L2_MASK);
1892
1893 return 0;
1894}
1895
1896static void dwc3_msm_bus_vote_w(struct work_struct *w)
1897{
1898 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1899 int ret;
1900
1901 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1902 mdwc->bus_vote);
1903 if (ret)
1904 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1905}
1906
1907static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1908{
1909 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1910 int i, num_ports;
1911 u32 reg;
1912
1913 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1914 if (mdwc->in_host_mode) {
1915 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1916 num_ports = HCS_MAX_PORTS(reg);
1917 for (i = 0; i < num_ports; i++) {
1918 reg = dwc3_msm_read_reg(mdwc->base,
1919 USB3_PORTSC + i*0x10);
1920 if (reg & PORT_PE) {
1921 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1922 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1923 else if (DEV_LOWSPEED(reg))
1924 mdwc->hs_phy->flags |= PHY_LS_MODE;
1925 }
1926 }
1927 } else {
1928 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1929 dwc->gadget.speed == USB_SPEED_FULL)
1930 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1931 else if (dwc->gadget.speed == USB_SPEED_LOW)
1932 mdwc->hs_phy->flags |= PHY_LS_MODE;
1933 }
1934}
1935
1936
1937static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1938{
Mayank Rana83ad5822016-08-09 14:17:22 -07001939 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001940 bool can_suspend_ssphy;
1941 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07001942 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001943
1944 if (atomic_read(&dwc->in_lpm)) {
1945 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1946 return 0;
1947 }
1948
1949 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07001950 evt = dwc->ev_buf;
1951 if ((evt->flags & DWC3_EVENT_PENDING)) {
1952 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001953 "%s: %d device events pending, abort suspend\n",
1954 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07001955 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07001956 }
1957 }
1958
1959 if (!mdwc->vbus_active && dwc->is_drd &&
1960 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
1961 /*
1962 * In some cases, the pm_runtime_suspend may be called by
1963 * usb_bam when there is pending lpm flag. However, if this is
1964 * done when cable was disconnected and otg state has not
1965 * yet changed to IDLE, then it means OTG state machine
1966 * is running and we race against it. So cancel LPM for now,
1967 * and OTG state machine will go for LPM later, after completing
1968 * transition to IDLE state.
1969 */
1970 dev_dbg(mdwc->dev,
1971 "%s: cable disconnected while not in idle otg state\n",
1972 __func__);
1973 return -EBUSY;
1974 }
1975
1976 /*
1977 * Check if device is not in CONFIGURED state
1978 * then check controller state of L2 and break
1979 * LPM sequence. Check this for device bus suspend case.
1980 */
1981 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
1982 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
1983 pr_err("%s(): Trying to go in LPM with state:%d\n",
1984 __func__, dwc->gadget.state);
1985 pr_err("%s(): LPM is not performed.\n", __func__);
1986 return -EBUSY;
1987 }
1988
1989 ret = dwc3_msm_prepare_suspend(mdwc);
1990 if (ret)
1991 return ret;
1992
1993 /* Initialize variables here */
1994 can_suspend_ssphy = !(mdwc->in_host_mode &&
1995 dwc3_msm_is_host_superspeed(mdwc));
1996
1997 /* Disable core irq */
1998 if (dwc->irq)
1999 disable_irq(dwc->irq);
2000
2001 /* disable power event irq, hs and ss phy irq is used as wake up src */
2002 disable_irq(mdwc->pwr_event_irq);
2003
2004 dwc3_set_phy_speed_flags(mdwc);
2005 /* Suspend HS PHY */
2006 usb_phy_set_suspend(mdwc->hs_phy, 1);
2007
2008 /* Suspend SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002009 if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002010 /* indicate phy about SS mode */
2011 if (dwc3_msm_is_superspeed(mdwc))
2012 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2013 usb_phy_set_suspend(mdwc->ss_phy, 1);
2014 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2015 }
2016
2017 /* make sure above writes are completed before turning off clocks */
2018 wmb();
2019
2020 /* Disable clocks */
2021 if (mdwc->bus_aggr_clk)
2022 clk_disable_unprepare(mdwc->bus_aggr_clk);
2023 clk_disable_unprepare(mdwc->utmi_clk);
2024
Hemant Kumar633dc332016-08-10 13:41:05 -07002025 /* Memory core: OFF, Memory periphery: OFF */
2026 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2027 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2028 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2029 }
2030
Mayank Rana511f3b22016-08-02 12:00:11 -07002031 clk_set_rate(mdwc->core_clk, 19200000);
2032 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302033 if (mdwc->noc_aggr_clk)
2034 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002035 /*
2036 * Disable iface_clk only after core_clk as core_clk has FSM
2037 * depedency on iface_clk. Hence iface_clk should be turned off
2038 * after core_clk is turned off.
2039 */
2040 clk_disable_unprepare(mdwc->iface_clk);
2041 /* USB PHY no more requires TCXO */
2042 clk_disable_unprepare(mdwc->xo_clk);
2043
2044 /* Perform controller power collapse */
2045 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2046 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2047 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2048 dwc3_msm_config_gdsc(mdwc, 0);
2049 clk_disable_unprepare(mdwc->sleep_clk);
2050 }
2051
2052 /* Remove bus voting */
2053 if (mdwc->bus_perf_client) {
2054 mdwc->bus_vote = 0;
2055 schedule_work(&mdwc->bus_vote_w);
2056 }
2057
2058 /*
2059 * release wakeup source with timeout to defer system suspend to
2060 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2061 * event is received.
2062 */
2063 if (mdwc->lpm_to_suspend_delay) {
2064 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2065 mdwc->lpm_to_suspend_delay);
2066 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2067 } else {
2068 pm_relax(mdwc->dev);
2069 }
2070
2071 atomic_set(&dwc->in_lpm, 1);
2072
2073 /*
2074 * with DCP or during cable disconnect, we dont require wakeup
2075 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2076 * case of host bus suspend and device bus suspend.
2077 */
2078 if (mdwc->vbus_active || mdwc->in_host_mode) {
2079 enable_irq_wake(mdwc->hs_phy_irq);
2080 enable_irq(mdwc->hs_phy_irq);
2081 if (mdwc->ss_phy_irq) {
2082 enable_irq_wake(mdwc->ss_phy_irq);
2083 enable_irq(mdwc->ss_phy_irq);
2084 }
2085 /*
2086 * Enable power event irq during bus suspend in host mode for
2087 * mapping MPM pin for DP so that wakeup can happen in system
2088 * suspend.
2089 */
2090 if (mdwc->in_host_mode) {
2091 enable_irq(mdwc->pwr_event_irq);
2092 enable_irq_wake(mdwc->pwr_event_irq);
2093 }
2094 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2095 }
2096
2097 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2098 return 0;
2099}
2100
2101static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2102{
2103 int ret;
2104 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2105
2106 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2107
2108 if (!atomic_read(&dwc->in_lpm)) {
2109 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2110 return 0;
2111 }
2112
2113 pm_stay_awake(mdwc->dev);
2114
2115 /* Enable bus voting */
2116 if (mdwc->bus_perf_client) {
2117 mdwc->bus_vote = 1;
2118 schedule_work(&mdwc->bus_vote_w);
2119 }
2120
2121 /* Vote for TCXO while waking up USB HSPHY */
2122 ret = clk_prepare_enable(mdwc->xo_clk);
2123 if (ret)
2124 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2125 __func__, ret);
2126
2127 /* Restore controller power collapse */
2128 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2129 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2130 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302131 ret = reset_control_assert(mdwc->core_reset);
2132 if (ret)
2133 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2134 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002135 /* HW requires a short delay for reset to take place properly */
2136 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302137 ret = reset_control_deassert(mdwc->core_reset);
2138 if (ret)
2139 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2140 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002141 clk_prepare_enable(mdwc->sleep_clk);
2142 }
2143
2144 /*
2145 * Enable clocks
2146 * Turned ON iface_clk before core_clk due to FSM depedency.
2147 */
2148 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302149 if (mdwc->noc_aggr_clk)
2150 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002151 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2152 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002153
2154 /* set Memory core: ON, Memory periphery: ON */
2155 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2156 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2157
Mayank Rana511f3b22016-08-02 12:00:11 -07002158 clk_prepare_enable(mdwc->utmi_clk);
2159 if (mdwc->bus_aggr_clk)
2160 clk_prepare_enable(mdwc->bus_aggr_clk);
2161
2162 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002163 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2164 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002165 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2166 if (mdwc->typec_orientation == ORIENTATION_CC1)
2167 mdwc->ss_phy->flags |= PHY_LANE_A;
2168 if (mdwc->typec_orientation == ORIENTATION_CC2)
2169 mdwc->ss_phy->flags |= PHY_LANE_B;
2170 usb_phy_set_suspend(mdwc->ss_phy, 0);
2171 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2172 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2173 }
2174
2175 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2176 /* Resume HS PHY */
2177 usb_phy_set_suspend(mdwc->hs_phy, 0);
2178
2179 /* Recover from controller power collapse */
2180 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2181 u32 tmp;
2182
2183 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2184
2185 dwc3_msm_power_collapse_por(mdwc);
2186
2187 /* Get initial P3 status and enable IN_P3 event */
2188 tmp = dwc3_msm_read_reg_field(mdwc->base,
2189 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2190 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2191 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2192 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2193
2194 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2195 }
2196
2197 atomic_set(&dwc->in_lpm, 0);
2198
2199 /* Disable HSPHY auto suspend */
2200 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2201 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2202 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2203 DWC3_GUSB2PHYCFG_SUSPHY));
2204
2205 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2206 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2207 disable_irq_wake(mdwc->hs_phy_irq);
2208 disable_irq_nosync(mdwc->hs_phy_irq);
2209 if (mdwc->ss_phy_irq) {
2210 disable_irq_wake(mdwc->ss_phy_irq);
2211 disable_irq_nosync(mdwc->ss_phy_irq);
2212 }
2213 if (mdwc->in_host_mode) {
2214 disable_irq_wake(mdwc->pwr_event_irq);
2215 disable_irq(mdwc->pwr_event_irq);
2216 }
2217 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2218 }
2219
2220 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2221
2222 /* enable power evt irq for IN P3 detection */
2223 enable_irq(mdwc->pwr_event_irq);
2224
2225 /* Enable core irq */
2226 if (dwc->irq)
2227 enable_irq(dwc->irq);
2228
2229 /*
2230 * Handle other power events that could not have been handled during
2231 * Low Power Mode
2232 */
2233 dwc3_pwr_event_handler(mdwc);
2234
Mayank Rana511f3b22016-08-02 12:00:11 -07002235 return 0;
2236}
2237
2238/**
2239 * dwc3_ext_event_notify - callback to handle events from external transceiver
2240 *
2241 * Returns 0 on success
2242 */
2243static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2244{
2245 /* Flush processing any pending events before handling new ones */
2246 flush_delayed_work(&mdwc->sm_work);
2247
2248 if (mdwc->id_state == DWC3_ID_FLOAT) {
2249 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2250 set_bit(ID, &mdwc->inputs);
2251 } else {
2252 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2253 clear_bit(ID, &mdwc->inputs);
2254 }
2255
2256 if (mdwc->vbus_active && !mdwc->in_restart) {
2257 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2258 set_bit(B_SESS_VLD, &mdwc->inputs);
2259 } else {
2260 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2261 clear_bit(B_SESS_VLD, &mdwc->inputs);
2262 }
2263
2264 if (mdwc->suspend) {
2265 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2266 set_bit(B_SUSPEND, &mdwc->inputs);
2267 } else {
2268 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2269 clear_bit(B_SUSPEND, &mdwc->inputs);
2270 }
2271
2272 schedule_delayed_work(&mdwc->sm_work, 0);
2273}
2274
2275static void dwc3_resume_work(struct work_struct *w)
2276{
2277 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002278
2279 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2280
2281 /*
2282 * exit LPM first to meet resume timeline from device side.
2283 * resume_pending flag would prevent calling
2284 * dwc3_msm_resume() in case we are here due to system
2285 * wide resume without usb cable connected. This flag is set
2286 * only in case of power event irq in lpm.
2287 */
2288 if (mdwc->resume_pending) {
2289 dwc3_msm_resume(mdwc);
2290 mdwc->resume_pending = false;
2291 }
2292
Mayank Rana83ad5822016-08-09 14:17:22 -07002293 if (atomic_read(&mdwc->pm_suspended))
Mayank Rana511f3b22016-08-02 12:00:11 -07002294 /* let pm resume kick in resume work later */
2295 return;
Mayank Rana511f3b22016-08-02 12:00:11 -07002296 dwc3_ext_event_notify(mdwc);
2297}
2298
2299static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2300{
2301 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2302 u32 irq_stat, irq_clear = 0;
2303
2304 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2305 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2306
2307 /* Check for P3 events */
2308 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2309 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2310 /* Can't tell if entered or exit P3, so check LINKSTATE */
2311 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2312 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2313 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2314 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2315
2316 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2317 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2318 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2319 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2320 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2321 atomic_set(&mdwc->in_p3, 0);
2322 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2323 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2324 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2325 atomic_set(&mdwc->in_p3, 1);
2326 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2327 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2328 }
2329
2330 /* Clear L2 exit */
2331 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2332 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2333 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2334 }
2335
2336 /* Handle exit from L1 events */
2337 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2338 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2339 __func__);
2340 if (usb_gadget_wakeup(&dwc->gadget))
2341 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2342 __func__);
2343 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2344 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2345 }
2346
2347 /* Unhandled events */
2348 if (irq_stat)
2349 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2350 __func__, irq_stat);
2351
2352 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2353}
2354
2355static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2356{
2357 struct dwc3_msm *mdwc = _mdwc;
2358 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2359
2360 dev_dbg(mdwc->dev, "%s\n", __func__);
2361
2362 if (atomic_read(&dwc->in_lpm))
2363 dwc3_resume_work(&mdwc->resume_work);
2364 else
2365 dwc3_pwr_event_handler(mdwc);
2366
Mayank Rana511f3b22016-08-02 12:00:11 -07002367 return IRQ_HANDLED;
2368}
2369
2370static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2371{
2372 struct dwc3_msm *mdwc = data;
2373 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2374
2375 dwc->t_pwr_evt_irq = ktime_get();
2376 dev_dbg(mdwc->dev, "%s received\n", __func__);
2377 /*
2378 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2379 * which interrupts have been triggered, as the clocks are disabled.
2380 * Resume controller by waking up pwr event irq thread.After re-enabling
2381 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2382 * all other power events.
2383 */
2384 if (atomic_read(&dwc->in_lpm)) {
2385 /* set this to call dwc3_msm_resume() */
2386 mdwc->resume_pending = true;
2387 return IRQ_WAKE_THREAD;
2388 }
2389
2390 dwc3_pwr_event_handler(mdwc);
2391 return IRQ_HANDLED;
2392}
2393
2394static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2395 unsigned long action, void *hcpu)
2396{
2397 uint32_t cpu = (uintptr_t)hcpu;
2398 struct dwc3_msm *mdwc =
2399 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2400
2401 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2402 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2403 cpu_to_affin, mdwc->irq_to_affin);
2404 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2405 }
2406
2407 return NOTIFY_OK;
2408}
2409
2410static void dwc3_otg_sm_work(struct work_struct *w);
2411
2412static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2413{
2414 int ret;
2415
2416 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2417 if (IS_ERR(mdwc->dwc3_gdsc))
2418 mdwc->dwc3_gdsc = NULL;
2419
2420 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2421 if (IS_ERR(mdwc->xo_clk)) {
2422 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2423 __func__);
2424 ret = PTR_ERR(mdwc->xo_clk);
2425 return ret;
2426 }
2427 clk_set_rate(mdwc->xo_clk, 19200000);
2428
2429 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2430 if (IS_ERR(mdwc->iface_clk)) {
2431 dev_err(mdwc->dev, "failed to get iface_clk\n");
2432 ret = PTR_ERR(mdwc->iface_clk);
2433 return ret;
2434 }
2435
2436 /*
2437 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2438 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2439 * On newer platform it can run at 150MHz as well.
2440 */
2441 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2442 if (IS_ERR(mdwc->core_clk)) {
2443 dev_err(mdwc->dev, "failed to get core_clk\n");
2444 ret = PTR_ERR(mdwc->core_clk);
2445 return ret;
2446 }
2447
Amit Nischal4d278212016-06-06 17:54:34 +05302448 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2449 if (IS_ERR(mdwc->core_reset)) {
2450 dev_err(mdwc->dev, "failed to get core_reset\n");
2451 return PTR_ERR(mdwc->core_reset);
2452 }
2453
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302454 if (!of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
2455 (u32 *)&mdwc->core_clk_rate)) {
2456 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
2457 mdwc->core_clk_rate);
2458 } else {
2459 /*
2460 * Get Max supported clk frequency for USB Core CLK and request
2461 * to set the same.
2462 */
2463 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
2464 }
2465
Mayank Rana511f3b22016-08-02 12:00:11 -07002466 if (IS_ERR_VALUE(mdwc->core_clk_rate)) {
2467 dev_err(mdwc->dev, "fail to get core clk max freq.\n");
2468 } else {
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302469 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2470 mdwc->core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002471 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2472 if (ret)
2473 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n",
2474 ret);
2475 }
2476
2477 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2478 if (IS_ERR(mdwc->sleep_clk)) {
2479 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2480 ret = PTR_ERR(mdwc->sleep_clk);
2481 return ret;
2482 }
2483
2484 clk_set_rate(mdwc->sleep_clk, 32000);
2485 mdwc->utmi_clk_rate = 19200000;
2486 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2487 if (IS_ERR(mdwc->utmi_clk)) {
2488 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2489 ret = PTR_ERR(mdwc->utmi_clk);
2490 return ret;
2491 }
2492
2493 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2494 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2495 if (IS_ERR(mdwc->bus_aggr_clk))
2496 mdwc->bus_aggr_clk = NULL;
2497
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302498 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2499 if (IS_ERR(mdwc->noc_aggr_clk))
2500 mdwc->noc_aggr_clk = NULL;
2501
Mayank Rana511f3b22016-08-02 12:00:11 -07002502 if (of_property_match_string(mdwc->dev->of_node,
2503 "clock-names", "cfg_ahb_clk") >= 0) {
2504 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2505 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2506 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2507 mdwc->cfg_ahb_clk = NULL;
2508 if (ret != -EPROBE_DEFER)
2509 dev_err(mdwc->dev,
2510 "failed to get cfg_ahb_clk ret %d\n",
2511 ret);
2512 return ret;
2513 }
2514 }
2515
2516 return 0;
2517}
2518
2519static int dwc3_msm_id_notifier(struct notifier_block *nb,
2520 unsigned long event, void *ptr)
2521{
2522 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002523 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002524 struct extcon_dev *edev = ptr;
2525 enum dwc3_id_state id;
2526 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002527 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002528
2529 if (!edev) {
2530 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2531 goto done;
2532 }
2533
2534 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2535
2536 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2537
2538 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2539 if (cc_state < 0)
2540 mdwc->typec_orientation = ORIENTATION_NONE;
2541 else
2542 mdwc->typec_orientation =
2543 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2544
Hemant Kumarde1df692016-04-26 19:36:48 -07002545 dev_dbg(mdwc->dev, "cc_state:%d", mdwc->typec_orientation);
2546
2547 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2548 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
2549
Mayank Rana511f3b22016-08-02 12:00:11 -07002550 if (mdwc->id_state != id) {
2551 mdwc->id_state = id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002552 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2553 }
2554
2555done:
2556 return NOTIFY_DONE;
2557}
2558
2559static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2560 unsigned long event, void *ptr)
2561{
2562 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2563 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2564 struct extcon_dev *edev = ptr;
2565 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002566 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002567
2568 if (!edev) {
2569 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2570 goto done;
2571 }
2572
2573 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2574
2575 if (mdwc->vbus_active == event)
2576 return NOTIFY_DONE;
2577
2578 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2579 if (cc_state < 0)
2580 mdwc->typec_orientation = ORIENTATION_NONE;
2581 else
2582 mdwc->typec_orientation =
2583 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2584
Hemant Kumarde1df692016-04-26 19:36:48 -07002585 dev_dbg(mdwc->dev, "cc_state:%d", mdwc->typec_orientation);
2586
2587 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2588 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
2589
Mayank Rana511f3b22016-08-02 12:00:11 -07002590 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002591 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002592 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002593done:
2594 return NOTIFY_DONE;
2595}
2596
2597static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2598{
2599 struct device_node *node = mdwc->dev->of_node;
2600 struct extcon_dev *edev;
2601 int ret = 0;
2602
2603 if (!of_property_read_bool(node, "extcon"))
2604 return 0;
2605
2606 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2607 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2608 return PTR_ERR(edev);
2609
2610 if (!IS_ERR(edev)) {
2611 mdwc->extcon_vbus = edev;
2612 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2613 ret = extcon_register_notifier(edev, EXTCON_USB,
2614 &mdwc->vbus_nb);
2615 if (ret < 0) {
2616 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2617 return ret;
2618 }
2619 }
2620
2621 /* if a second phandle was provided, use it to get a separate edev */
2622 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2623 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2624 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2625 ret = PTR_ERR(edev);
2626 goto err;
2627 }
2628 }
2629
2630 if (!IS_ERR(edev)) {
2631 mdwc->extcon_id = edev;
2632 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2633 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2634 &mdwc->id_nb);
2635 if (ret < 0) {
2636 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2637 goto err;
2638 }
2639 }
2640
2641 return 0;
2642err:
2643 if (mdwc->extcon_vbus)
2644 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2645 &mdwc->vbus_nb);
2646 return ret;
2647}
2648
2649static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2650 char *buf)
2651{
2652 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2653
2654 if (mdwc->vbus_active)
2655 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2656 if (mdwc->id_state == DWC3_ID_GROUND)
2657 return snprintf(buf, PAGE_SIZE, "host\n");
2658
2659 return snprintf(buf, PAGE_SIZE, "none\n");
2660}
2661
2662static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2663 const char *buf, size_t count)
2664{
2665 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2666
2667 if (sysfs_streq(buf, "peripheral")) {
2668 mdwc->vbus_active = true;
2669 mdwc->id_state = DWC3_ID_FLOAT;
2670 } else if (sysfs_streq(buf, "host")) {
2671 mdwc->vbus_active = false;
2672 mdwc->id_state = DWC3_ID_GROUND;
2673 } else {
2674 mdwc->vbus_active = false;
2675 mdwc->id_state = DWC3_ID_FLOAT;
2676 }
2677
2678 dwc3_ext_event_notify(mdwc);
2679
2680 return count;
2681}
2682
2683static DEVICE_ATTR_RW(mode);
2684
2685static int dwc3_msm_probe(struct platform_device *pdev)
2686{
2687 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2688 struct device *dev = &pdev->dev;
2689 struct dwc3_msm *mdwc;
2690 struct dwc3 *dwc;
2691 struct resource *res;
2692 void __iomem *tcsr;
2693 bool host_mode;
2694 int ret = 0;
2695 int ext_hub_reset_gpio;
2696 u32 val;
2697
2698 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2699 if (!mdwc)
2700 return -ENOMEM;
2701
2702 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2703 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2704 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2705 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2706 return -EOPNOTSUPP;
2707 }
2708 }
2709
2710 platform_set_drvdata(pdev, mdwc);
2711 mdwc->dev = &pdev->dev;
2712
2713 INIT_LIST_HEAD(&mdwc->req_complete_list);
2714 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2715 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2716 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07002717 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002718 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
2719
2720 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2721 if (!mdwc->dwc3_wq) {
2722 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2723 return -ENOMEM;
2724 }
2725
2726 /* Get all clks and gdsc reference */
2727 ret = dwc3_msm_get_clk_gdsc(mdwc);
2728 if (ret) {
2729 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2730 return ret;
2731 }
2732
2733 mdwc->id_state = DWC3_ID_FLOAT;
2734 set_bit(ID, &mdwc->inputs);
2735
2736 mdwc->charging_disabled = of_property_read_bool(node,
2737 "qcom,charging-disabled");
2738
2739 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2740 &mdwc->lpm_to_suspend_delay);
2741 if (ret) {
2742 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2743 mdwc->lpm_to_suspend_delay = 0;
2744 }
2745
2746 /*
2747 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2748 * DP and DM linestate transitions during low power mode.
2749 */
2750 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2751 if (mdwc->hs_phy_irq < 0) {
2752 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2753 ret = -EINVAL;
2754 goto err;
2755 } else {
2756 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2757 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2758 msm_dwc3_pwr_irq,
2759 msm_dwc3_pwr_irq_thread,
2760 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2761 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2762 if (ret) {
2763 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2764 ret);
2765 goto err;
2766 }
2767 }
2768
2769 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2770 if (mdwc->ss_phy_irq < 0) {
2771 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2772 } else {
2773 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2774 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2775 msm_dwc3_pwr_irq,
2776 msm_dwc3_pwr_irq_thread,
2777 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2778 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2779 if (ret) {
2780 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2781 ret);
2782 goto err;
2783 }
2784 }
2785
2786 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2787 if (mdwc->pwr_event_irq < 0) {
2788 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2789 ret = -EINVAL;
2790 goto err;
2791 } else {
2792 /* will be enabled in dwc3_msm_resume() */
2793 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
2794 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
2795 msm_dwc3_pwr_irq,
2796 msm_dwc3_pwr_irq_thread,
2797 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
2798 "msm_dwc3", mdwc);
2799 if (ret) {
2800 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
2801 ret);
2802 goto err;
2803 }
2804 }
2805
2806 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
2807 if (!res) {
2808 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
2809 } else {
2810 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
2811 resource_size(res));
2812 if (IS_ERR_OR_NULL(tcsr)) {
2813 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
2814 } else {
2815 /* Enable USB3 on the primary USB port. */
2816 writel_relaxed(0x1, tcsr);
2817 /*
2818 * Ensure that TCSR write is completed before
2819 * USB registers initialization.
2820 */
2821 mb();
2822 }
2823 }
2824
2825 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
2826 if (!res) {
2827 dev_err(&pdev->dev, "missing memory base resource\n");
2828 ret = -ENODEV;
2829 goto err;
2830 }
2831
2832 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
2833 resource_size(res));
2834 if (!mdwc->base) {
2835 dev_err(&pdev->dev, "ioremap failed\n");
2836 ret = -ENODEV;
2837 goto err;
2838 }
2839
2840 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2841 "ahb2phy_base");
2842 if (res) {
2843 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
2844 res->start, resource_size(res));
2845 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
2846 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
2847 mdwc->ahb2phy_base = NULL;
2848 } else {
2849 /*
2850 * On some targets cfg_ahb_clk depends upon usb gdsc
2851 * regulator. If cfg_ahb_clk is enabled without
2852 * turning on usb gdsc regulator clk is stuck off.
2853 */
2854 dwc3_msm_config_gdsc(mdwc, 1);
2855 clk_prepare_enable(mdwc->cfg_ahb_clk);
2856 /* Configure AHB2PHY for one wait state read/write*/
2857 val = readl_relaxed(mdwc->ahb2phy_base +
2858 PERIPH_SS_AHB2PHY_TOP_CFG);
2859 if (val != ONE_READ_WRITE_WAIT) {
2860 writel_relaxed(ONE_READ_WRITE_WAIT,
2861 mdwc->ahb2phy_base +
2862 PERIPH_SS_AHB2PHY_TOP_CFG);
2863 /* complete above write before using USB PHY */
2864 mb();
2865 }
2866 clk_disable_unprepare(mdwc->cfg_ahb_clk);
2867 dwc3_msm_config_gdsc(mdwc, 0);
2868 }
2869 }
2870
2871 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
2872 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
2873 if (IS_ERR(mdwc->dbm)) {
2874 dev_err(&pdev->dev, "unable to get dbm device\n");
2875 ret = -EPROBE_DEFER;
2876 goto err;
2877 }
2878 /*
2879 * Add power event if the dbm indicates coming out of L1
2880 * by interrupt
2881 */
2882 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
2883 if (!mdwc->pwr_event_irq) {
2884 dev_err(&pdev->dev,
2885 "need pwr_event_irq exiting L1\n");
2886 ret = -EINVAL;
2887 goto err;
2888 }
2889 }
2890 }
2891
2892 ext_hub_reset_gpio = of_get_named_gpio(node,
2893 "qcom,ext-hub-reset-gpio", 0);
2894
2895 if (gpio_is_valid(ext_hub_reset_gpio)
2896 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
2897 "qcom,ext-hub-reset-gpio"))) {
2898 /* reset external hub */
2899 gpio_direction_output(ext_hub_reset_gpio, 1);
2900 /*
2901 * Hub reset should be asserted for minimum 5microsec
2902 * before deasserting.
2903 */
2904 usleep_range(5, 1000);
2905 gpio_direction_output(ext_hub_reset_gpio, 0);
2906 }
2907
2908 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
2909 &mdwc->tx_fifo_size))
2910 dev_err(&pdev->dev,
2911 "unable to read platform data tx fifo size\n");
2912
2913 mdwc->disable_host_mode_pm = of_property_read_bool(node,
2914 "qcom,disable-host-mode-pm");
2915
2916 dwc3_set_notifier(&dwc3_msm_notify_event);
2917
2918 /* Assumes dwc3 is the first DT child of dwc3-msm */
2919 dwc3_node = of_get_next_available_child(node, NULL);
2920 if (!dwc3_node) {
2921 dev_err(&pdev->dev, "failed to find dwc3 child\n");
2922 ret = -ENODEV;
2923 goto err;
2924 }
2925
2926 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2927 if (ret) {
2928 dev_err(&pdev->dev,
2929 "failed to add create dwc3 core\n");
2930 of_node_put(dwc3_node);
2931 goto err;
2932 }
2933
2934 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
2935 of_node_put(dwc3_node);
2936 if (!mdwc->dwc3) {
2937 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
2938 goto put_dwc3;
2939 }
2940
2941 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2942 "usb-phy", 0);
2943 if (IS_ERR(mdwc->hs_phy)) {
2944 dev_err(&pdev->dev, "unable to get hsphy device\n");
2945 ret = PTR_ERR(mdwc->hs_phy);
2946 goto put_dwc3;
2947 }
2948 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2949 "usb-phy", 1);
2950 if (IS_ERR(mdwc->ss_phy)) {
2951 dev_err(&pdev->dev, "unable to get ssphy device\n");
2952 ret = PTR_ERR(mdwc->ss_phy);
2953 goto put_dwc3;
2954 }
2955
2956 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2957 if (mdwc->bus_scale_table) {
2958 mdwc->bus_perf_client =
2959 msm_bus_scale_register_client(mdwc->bus_scale_table);
2960 }
2961
2962 dwc = platform_get_drvdata(mdwc->dwc3);
2963 if (!dwc) {
2964 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
2965 goto put_dwc3;
2966 }
2967
2968 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
2969 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
2970
2971 if (cpu_to_affin)
2972 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2973
Mayank Ranaf4918d32016-12-15 13:35:55 -08002974 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
2975 &mdwc->num_gsi_event_buffers);
2976
Mayank Rana511f3b22016-08-02 12:00:11 -07002977 /*
2978 * Clocks and regulators will not be turned on until the first time
2979 * runtime PM resume is called. This is to allow for booting up with
2980 * charger already connected so as not to disturb PHY line states.
2981 */
2982 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
2983 atomic_set(&dwc->in_lpm, 1);
2984 pm_runtime_set_suspended(mdwc->dev);
2985 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
2986 pm_runtime_use_autosuspend(mdwc->dev);
2987 pm_runtime_enable(mdwc->dev);
2988 device_init_wakeup(mdwc->dev, 1);
2989
2990 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
2991 pm_runtime_get_noresume(mdwc->dev);
2992
2993 ret = dwc3_msm_extcon_register(mdwc);
2994 if (ret)
2995 goto put_dwc3;
2996
2997 /* Update initial VBUS/ID state from extcon */
2998 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
2999 EXTCON_USB))
3000 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
3001 if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
3002 EXTCON_USB_HOST))
3003 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
3004
3005 device_create_file(&pdev->dev, &dev_attr_mode);
3006
3007 schedule_delayed_work(&mdwc->sm_work, 0);
3008
3009 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3010 if (!dwc->is_drd && host_mode) {
3011 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3012 mdwc->id_state = DWC3_ID_GROUND;
3013 dwc3_ext_event_notify(mdwc);
3014 }
3015
3016 return 0;
3017
3018put_dwc3:
3019 platform_device_put(mdwc->dwc3);
3020 if (mdwc->bus_perf_client)
3021 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3022err:
3023 return ret;
3024}
3025
3026static int dwc3_msm_remove_children(struct device *dev, void *data)
3027{
3028 device_unregister(dev);
3029 return 0;
3030}
3031
3032static int dwc3_msm_remove(struct platform_device *pdev)
3033{
3034 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
3035 int ret_pm;
3036
3037 device_remove_file(&pdev->dev, &dev_attr_mode);
3038
3039 if (cpu_to_affin)
3040 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3041
3042 /*
3043 * In case of system suspend, pm_runtime_get_sync fails.
3044 * Hence turn ON the clocks manually.
3045 */
3046 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003047 if (ret_pm < 0) {
3048 dev_err(mdwc->dev,
3049 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303050 if (mdwc->noc_aggr_clk)
3051 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003052 clk_prepare_enable(mdwc->utmi_clk);
3053 clk_prepare_enable(mdwc->core_clk);
3054 clk_prepare_enable(mdwc->iface_clk);
3055 clk_prepare_enable(mdwc->sleep_clk);
3056 if (mdwc->bus_aggr_clk)
3057 clk_prepare_enable(mdwc->bus_aggr_clk);
3058 clk_prepare_enable(mdwc->xo_clk);
3059 }
3060
3061 cancel_delayed_work_sync(&mdwc->sm_work);
3062
3063 if (mdwc->hs_phy)
3064 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3065 platform_device_put(mdwc->dwc3);
3066 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
3067
Mayank Rana511f3b22016-08-02 12:00:11 -07003068 pm_runtime_disable(mdwc->dev);
3069 pm_runtime_barrier(mdwc->dev);
3070 pm_runtime_put_sync(mdwc->dev);
3071 pm_runtime_set_suspended(mdwc->dev);
3072 device_wakeup_disable(mdwc->dev);
3073
3074 if (mdwc->bus_perf_client)
3075 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3076
3077 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3078 regulator_disable(mdwc->vbus_reg);
3079
3080 disable_irq(mdwc->hs_phy_irq);
3081 if (mdwc->ss_phy_irq)
3082 disable_irq(mdwc->ss_phy_irq);
3083 disable_irq(mdwc->pwr_event_irq);
3084
3085 clk_disable_unprepare(mdwc->utmi_clk);
3086 clk_set_rate(mdwc->core_clk, 19200000);
3087 clk_disable_unprepare(mdwc->core_clk);
3088 clk_disable_unprepare(mdwc->iface_clk);
3089 clk_disable_unprepare(mdwc->sleep_clk);
3090 clk_disable_unprepare(mdwc->xo_clk);
3091 clk_put(mdwc->xo_clk);
3092
3093 dwc3_msm_config_gdsc(mdwc, 0);
3094
3095 return 0;
3096}
3097
3098#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3099
3100/**
3101 * dwc3_otg_start_host - helper function for starting/stoping the host
3102 * controller driver.
3103 *
3104 * @mdwc: Pointer to the dwc3_msm structure.
3105 * @on: start / stop the host controller driver.
3106 *
3107 * Returns 0 on success otherwise negative errno.
3108 */
3109static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3110{
3111 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3112 int ret = 0;
3113
3114 if (!dwc->xhci)
3115 return -EINVAL;
3116
3117 /*
3118 * The vbus_reg pointer could have multiple values
3119 * NULL: regulator_get() hasn't been called, or was previously deferred
3120 * IS_ERR: regulator could not be obtained, so skip using it
3121 * Valid pointer otherwise
3122 */
3123 if (!mdwc->vbus_reg) {
3124 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3125 "vbus_dwc3");
3126 if (IS_ERR(mdwc->vbus_reg) &&
3127 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3128 /* regulators may not be ready, so retry again later */
3129 mdwc->vbus_reg = NULL;
3130 return -EPROBE_DEFER;
3131 }
3132 }
3133
3134 if (on) {
3135 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3136
Mayank Rana511f3b22016-08-02 12:00:11 -07003137 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Hemant Kumarde1df692016-04-26 19:36:48 -07003138 if (dwc->maximum_speed == USB_SPEED_SUPER)
3139 mdwc->ss_phy->flags |= PHY_HOST_MODE;
3140
3141 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003142 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3143 if (!IS_ERR(mdwc->vbus_reg))
3144 ret = regulator_enable(mdwc->vbus_reg);
3145 if (ret) {
3146 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3147 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3148 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3149 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003150 return ret;
3151 }
3152
3153 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3154
3155 /*
3156 * FIXME If micro A cable is disconnected during system suspend,
3157 * xhci platform device will be removed before runtime pm is
3158 * enabled for xhci device. Due to this, disable_depth becomes
3159 * greater than one and runtimepm is not enabled for next microA
3160 * connect. Fix this by calling pm_runtime_init for xhci device.
3161 */
3162 pm_runtime_init(&dwc->xhci->dev);
3163 ret = platform_device_add(dwc->xhci);
3164 if (ret) {
3165 dev_err(mdwc->dev,
3166 "%s: failed to add XHCI pdev ret=%d\n",
3167 __func__, ret);
3168 if (!IS_ERR(mdwc->vbus_reg))
3169 regulator_disable(mdwc->vbus_reg);
3170 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3171 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3172 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003173 return ret;
3174 }
3175
3176 /*
3177 * In some cases it is observed that USB PHY is not going into
3178 * suspend with host mode suspend functionality. Hence disable
3179 * XHCI's runtime PM here if disable_host_mode_pm is set.
3180 */
3181 if (mdwc->disable_host_mode_pm)
3182 pm_runtime_disable(&dwc->xhci->dev);
3183
3184 mdwc->in_host_mode = true;
3185 dwc3_usb3_phy_suspend(dwc, true);
3186
3187 /* xHCI should have incremented child count as necessary */
Mayank Rana511f3b22016-08-02 12:00:11 -07003188 pm_runtime_mark_last_busy(mdwc->dev);
3189 pm_runtime_put_sync_autosuspend(mdwc->dev);
3190 } else {
3191 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3192
3193 if (!IS_ERR(mdwc->vbus_reg))
3194 ret = regulator_disable(mdwc->vbus_reg);
3195 if (ret) {
3196 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3197 return ret;
3198 }
3199
3200 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003201 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3202 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3203 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3204 platform_device_del(dwc->xhci);
3205
3206 /*
3207 * Perform USB hardware RESET (both core reset and DBM reset)
3208 * when moving from host to peripheral. This is required for
3209 * peripheral mode to work.
3210 */
3211 dwc3_msm_block_reset(mdwc, true);
3212
3213 dwc3_usb3_phy_suspend(dwc, false);
3214 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3215
3216 mdwc->in_host_mode = false;
3217
3218 /* re-init core and OTG registers as block reset clears these */
3219 dwc3_post_host_reset_core_init(dwc);
3220 pm_runtime_mark_last_busy(mdwc->dev);
3221 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003222 }
3223
3224 return 0;
3225}
3226
3227static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3228{
3229 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3230
3231 /* Update OTG VBUS Valid from HSPHY to controller */
3232 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3233 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3234 UTMI_OTG_VBUS_VALID,
3235 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3236
3237 /* Update only if Super Speed is supported */
3238 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3239 /* Update VBUS Valid from SSPHY to controller */
3240 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3241 LANE0_PWR_PRESENT,
3242 vbus_present ? LANE0_PWR_PRESENT : 0);
3243 }
3244}
3245
3246/**
3247 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3248 *
3249 * @mdwc: Pointer to the dwc3_msm structure.
3250 * @on: Turn ON/OFF the gadget.
3251 *
3252 * Returns 0 on success otherwise negative errno.
3253 */
3254static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3255{
3256 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3257
3258 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003259
3260 if (on) {
3261 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3262 __func__, dwc->gadget.name);
3263
3264 dwc3_override_vbus_status(mdwc, true);
3265 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3266 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3267
3268 /*
3269 * Core reset is not required during start peripheral. Only
3270 * DBM reset is required, hence perform only DBM reset here.
3271 */
3272 dwc3_msm_block_reset(mdwc, false);
3273
3274 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3275 usb_gadget_vbus_connect(&dwc->gadget);
3276 } else {
3277 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3278 __func__, dwc->gadget.name);
3279 usb_gadget_vbus_disconnect(&dwc->gadget);
3280 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3281 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3282 dwc3_override_vbus_status(mdwc, false);
3283 dwc3_usb3_phy_suspend(dwc, false);
3284 }
3285
3286 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003287
3288 return 0;
3289}
3290
3291static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3292{
Jack Pham8caff352016-08-19 16:33:55 -07003293 union power_supply_propval pval = {0};
Jack Phamd72bafe2016-08-09 11:07:22 -07003294 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003295
3296 if (mdwc->charging_disabled)
3297 return 0;
3298
3299 if (mdwc->max_power == mA)
3300 return 0;
3301
3302 if (!mdwc->usb_psy) {
3303 mdwc->usb_psy = power_supply_get_by_name("usb");
3304 if (!mdwc->usb_psy) {
3305 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3306 return -ENODEV;
3307 }
3308 }
3309
Jack Pham8caff352016-08-19 16:33:55 -07003310 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
3311 if (pval.intval != POWER_SUPPLY_TYPE_USB)
3312 return 0;
3313
Mayank Rana511f3b22016-08-02 12:00:11 -07003314 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3315
Mayank Rana511f3b22016-08-02 12:00:11 -07003316 /* Set max current limit in uA */
Jack Pham8caff352016-08-19 16:33:55 -07003317 pval.intval = 1000 * mA;
Jack Phamd72bafe2016-08-09 11:07:22 -07003318 ret = power_supply_set_property(mdwc->usb_psy,
3319 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3320 if (ret) {
3321 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3322 return ret;
3323 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003324
3325 mdwc->max_power = mA;
3326 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003327}
3328
3329
3330/**
3331 * dwc3_otg_sm_work - workqueue function.
3332 *
3333 * @w: Pointer to the dwc3 otg workqueue
3334 *
3335 * NOTE: After any change in otg_state, we must reschdule the state machine.
3336 */
3337static void dwc3_otg_sm_work(struct work_struct *w)
3338{
3339 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3340 struct dwc3 *dwc = NULL;
3341 bool work = 0;
3342 int ret = 0;
3343 unsigned long delay = 0;
3344 const char *state;
3345
3346 if (mdwc->dwc3)
3347 dwc = platform_get_drvdata(mdwc->dwc3);
3348
3349 if (!dwc) {
3350 dev_err(mdwc->dev, "dwc is NULL.\n");
3351 return;
3352 }
3353
3354 state = usb_otg_state_string(mdwc->otg_state);
3355 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana511f3b22016-08-02 12:00:11 -07003356
3357 /* Check OTG state */
3358 switch (mdwc->otg_state) {
3359 case OTG_STATE_UNDEFINED:
3360 /* Do nothing if no cable connected */
3361 if (test_bit(ID, &mdwc->inputs) &&
3362 !test_bit(B_SESS_VLD, &mdwc->inputs))
3363 break;
3364
Mayank Rana511f3b22016-08-02 12:00:11 -07003365 mdwc->otg_state = OTG_STATE_B_IDLE;
3366 /* fall-through */
3367 case OTG_STATE_B_IDLE:
3368 if (!test_bit(ID, &mdwc->inputs)) {
3369 dev_dbg(mdwc->dev, "!id\n");
3370 mdwc->otg_state = OTG_STATE_A_IDLE;
3371 work = 1;
3372 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3373 dev_dbg(mdwc->dev, "b_sess_vld\n");
3374 /*
3375 * Increment pm usage count upon cable connect. Count
3376 * is decremented in OTG_STATE_B_PERIPHERAL state on
3377 * cable disconnect or in bus suspend.
3378 */
3379 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003380 dwc3_otg_start_peripheral(mdwc, 1);
3381 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3382 work = 1;
3383 } else {
3384 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3385 dev_dbg(mdwc->dev, "Cable disconnected\n");
3386 }
3387 break;
3388
3389 case OTG_STATE_B_PERIPHERAL:
3390 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3391 !test_bit(ID, &mdwc->inputs)) {
3392 dev_dbg(mdwc->dev, "!id || !bsv\n");
3393 mdwc->otg_state = OTG_STATE_B_IDLE;
3394 dwc3_otg_start_peripheral(mdwc, 0);
3395 /*
3396 * Decrement pm usage count upon cable disconnect
3397 * which was incremented upon cable connect in
3398 * OTG_STATE_B_IDLE state
3399 */
3400 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003401 work = 1;
3402 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3403 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3404 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3405 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3406 /*
3407 * Decrement pm usage count upon bus suspend.
3408 * Count was incremented either upon cable
3409 * connect in OTG_STATE_B_IDLE or host
3410 * initiated resume after bus suspend in
3411 * OTG_STATE_B_SUSPEND state
3412 */
3413 pm_runtime_mark_last_busy(mdwc->dev);
3414 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003415 }
3416 break;
3417
3418 case OTG_STATE_B_SUSPEND:
3419 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3420 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3421 mdwc->otg_state = OTG_STATE_B_IDLE;
3422 dwc3_otg_start_peripheral(mdwc, 0);
3423 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3424 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3425 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3426 /*
3427 * Increment pm usage count upon host
3428 * initiated resume. Count was decremented
3429 * upon bus suspend in
3430 * OTG_STATE_B_PERIPHERAL state.
3431 */
3432 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003433 }
3434 break;
3435
3436 case OTG_STATE_A_IDLE:
3437 /* Switch to A-Device*/
3438 if (test_bit(ID, &mdwc->inputs)) {
3439 dev_dbg(mdwc->dev, "id\n");
3440 mdwc->otg_state = OTG_STATE_B_IDLE;
3441 mdwc->vbus_retry_count = 0;
3442 work = 1;
3443 } else {
3444 mdwc->otg_state = OTG_STATE_A_HOST;
3445 ret = dwc3_otg_start_host(mdwc, 1);
3446 if ((ret == -EPROBE_DEFER) &&
3447 mdwc->vbus_retry_count < 3) {
3448 /*
3449 * Get regulator failed as regulator driver is
3450 * not up yet. Will try to start host after 1sec
3451 */
3452 mdwc->otg_state = OTG_STATE_A_IDLE;
3453 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3454 delay = VBUS_REG_CHECK_DELAY;
3455 work = 1;
3456 mdwc->vbus_retry_count++;
3457 } else if (ret) {
3458 dev_err(mdwc->dev, "unable to start host\n");
3459 mdwc->otg_state = OTG_STATE_A_IDLE;
3460 goto ret;
3461 }
3462 }
3463 break;
3464
3465 case OTG_STATE_A_HOST:
3466 if (test_bit(ID, &mdwc->inputs)) {
3467 dev_dbg(mdwc->dev, "id\n");
3468 dwc3_otg_start_host(mdwc, 0);
3469 mdwc->otg_state = OTG_STATE_B_IDLE;
3470 mdwc->vbus_retry_count = 0;
3471 work = 1;
3472 } else {
3473 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003474 if (dwc)
3475 pm_runtime_resume(&dwc->xhci->dev);
3476 }
3477 break;
3478
3479 default:
3480 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3481
3482 }
3483
3484 if (work)
3485 schedule_delayed_work(&mdwc->sm_work, delay);
3486
3487ret:
3488 return;
3489}
3490
3491#ifdef CONFIG_PM_SLEEP
3492static int dwc3_msm_pm_suspend(struct device *dev)
3493{
3494 int ret = 0;
3495 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3496 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3497
3498 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003499
3500 flush_workqueue(mdwc->dwc3_wq);
3501 if (!atomic_read(&dwc->in_lpm)) {
3502 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3503 return -EBUSY;
3504 }
3505
3506 ret = dwc3_msm_suspend(mdwc);
3507 if (!ret)
3508 atomic_set(&mdwc->pm_suspended, 1);
3509
3510 return ret;
3511}
3512
3513static int dwc3_msm_pm_resume(struct device *dev)
3514{
3515 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3516
3517 dev_dbg(dev, "dwc3-msm PM resume\n");
3518
Mayank Rana511f3b22016-08-02 12:00:11 -07003519 /* flush to avoid race in read/write of pm_suspended */
3520 flush_workqueue(mdwc->dwc3_wq);
3521 atomic_set(&mdwc->pm_suspended, 0);
3522
3523 /* kick in otg state machine */
3524 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3525
3526 return 0;
3527}
3528#endif
3529
3530#ifdef CONFIG_PM
3531static int dwc3_msm_runtime_idle(struct device *dev)
3532{
3533 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003534
3535 return 0;
3536}
3537
3538static int dwc3_msm_runtime_suspend(struct device *dev)
3539{
3540 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3541
3542 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003543
3544 return dwc3_msm_suspend(mdwc);
3545}
3546
3547static int dwc3_msm_runtime_resume(struct device *dev)
3548{
3549 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3550
3551 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003552
3553 return dwc3_msm_resume(mdwc);
3554}
3555#endif
3556
3557static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3558 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3559 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3560 dwc3_msm_runtime_idle)
3561};
3562
3563static const struct of_device_id of_dwc3_matach[] = {
3564 {
3565 .compatible = "qcom,dwc-usb3-msm",
3566 },
3567 { },
3568};
3569MODULE_DEVICE_TABLE(of, of_dwc3_matach);
3570
3571static struct platform_driver dwc3_msm_driver = {
3572 .probe = dwc3_msm_probe,
3573 .remove = dwc3_msm_remove,
3574 .driver = {
3575 .name = "msm-dwc3",
3576 .pm = &dwc3_msm_dev_pm_ops,
3577 .of_match_table = of_dwc3_matach,
3578 },
3579};
3580
3581MODULE_LICENSE("GPL v2");
3582MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
3583
3584static int dwc3_msm_init(void)
3585{
3586 return platform_driver_register(&dwc3_msm_driver);
3587}
3588module_init(dwc3_msm_init);
3589
3590static void __exit dwc3_msm_exit(void)
3591{
3592 platform_driver_unregister(&dwc3_msm_driver);
3593}
3594module_exit(dwc3_msm_exit);