blob: fdbbe41a647b6ac7d03d12210751d57a537ac0e8 [file] [log] [blame]
Tony Truong349ee492014-10-01 17:35:56 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/gpio.h>
23#include <linux/iopoll.h>
24#include <linux/kernel.h>
25#include <linux/of_pci.h>
26#include <linux/pci.h>
27#include <linux/platform_device.h>
28#include <linux/regulator/consumer.h>
29#include <linux/regulator/rpm-smd-regulator.h>
30#include <linux/slab.h>
31#include <linux/types.h>
32#include <linux/of_gpio.h>
33#include <linux/clk/msm-clk.h>
34#include <linux/reset.h>
35#include <linux/msm-bus.h>
36#include <linux/msm-bus-board.h>
37#include <linux/debugfs.h>
38#include <linux/uaccess.h>
39#include <linux/io.h>
40#include <linux/msi.h>
41#include <linux/interrupt.h>
42#include <linux/irq.h>
43#include <linux/irqdomain.h>
44#include <linux/pm_wakeup.h>
45#include <linux/compiler.h>
46#include <soc/qcom/scm.h>
47#include <linux/ipc_logging.h>
48#include <linux/msm_pcie.h>
49
50#ifdef CONFIG_ARCH_MDMCALIFORNIUM
51#define PCIE_VENDOR_ID_RCP 0x17cb
52#define PCIE_DEVICE_ID_RCP 0x0302
53
54#define PCIE20_L1SUB_CONTROL1 0x158
55#define PCIE20_PARF_DBI_BASE_ADDR 0x350
56#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
57
58#define TX_BASE 0x200
59#define RX_BASE 0x400
60#define PCS_BASE 0x800
61#define PCS_MISC_BASE 0x600
62
63#elif defined(CONFIG_ARCH_MSM8998)
64#define PCIE_VENDOR_ID_RCP 0x17cb
65#define PCIE_DEVICE_ID_RCP 0x0105
66
67#define PCIE20_L1SUB_CONTROL1 0x1E4
68#define PCIE20_PARF_DBI_BASE_ADDR 0x350
69#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
70
71#define TX_BASE 0
72#define RX_BASE 0
73#define PCS_BASE 0x800
74#define PCS_MISC_BASE 0
75
76#else
77#define PCIE_VENDOR_ID_RCP 0x17cb
78#define PCIE_DEVICE_ID_RCP 0x0104
79
80#define PCIE20_L1SUB_CONTROL1 0x158
81#define PCIE20_PARF_DBI_BASE_ADDR 0x168
82#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
83
84#define TX_BASE 0x1000
85#define RX_BASE 0x1200
86#define PCS_BASE 0x1400
87#define PCS_MISC_BASE 0
88#endif
89
90#define TX(n, m) (TX_BASE + n * m * 0x1000)
91#define RX(n, m) (RX_BASE + n * m * 0x1000)
92#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
93#define PCS_MISC_PORT(n, m) (PCS_MISC_BASE + n * m * 0x1000)
94
95#define QSERDES_COM_BG_TIMER 0x00C
96#define QSERDES_COM_SSC_EN_CENTER 0x010
97#define QSERDES_COM_SSC_ADJ_PER1 0x014
98#define QSERDES_COM_SSC_ADJ_PER2 0x018
99#define QSERDES_COM_SSC_PER1 0x01C
100#define QSERDES_COM_SSC_PER2 0x020
101#define QSERDES_COM_SSC_STEP_SIZE1 0x024
102#define QSERDES_COM_SSC_STEP_SIZE2 0x028
103#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
104#define QSERDES_COM_CLK_ENABLE1 0x038
105#define QSERDES_COM_SYS_CLK_CTRL 0x03C
106#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
107#define QSERDES_COM_PLL_IVCO 0x048
108#define QSERDES_COM_LOCK_CMP1_MODE0 0x04C
109#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
110#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
111#define QSERDES_COM_BG_TRIM 0x070
112#define QSERDES_COM_CLK_EP_DIV 0x074
113#define QSERDES_COM_CP_CTRL_MODE0 0x078
114#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
115#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
116#define QSERDES_COM_SYSCLK_EN_SEL 0x0AC
117#define QSERDES_COM_RESETSM_CNTRL 0x0B4
118#define QSERDES_COM_RESTRIM_CTRL 0x0BC
119#define QSERDES_COM_RESCODE_DIV_NUM 0x0C4
120#define QSERDES_COM_LOCK_CMP_EN 0x0C8
121#define QSERDES_COM_DEC_START_MODE0 0x0D0
122#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0DC
123#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0E0
124#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0E4
125#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
126#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10C
127#define QSERDES_COM_VCO_TUNE_CTRL 0x124
128#define QSERDES_COM_VCO_TUNE_MAP 0x128
129#define QSERDES_COM_VCO_TUNE1_MODE0 0x12C
130#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
131#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
132#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
133#define QSERDES_COM_BG_CTRL 0x170
134#define QSERDES_COM_CLK_SELECT 0x174
135#define QSERDES_COM_HSCLK_SEL 0x178
136#define QSERDES_COM_CORECLK_DIV 0x184
137#define QSERDES_COM_CORE_CLK_EN 0x18C
138#define QSERDES_COM_C_READY_STATUS 0x190
139#define QSERDES_COM_CMN_CONFIG 0x194
140#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19C
141#define QSERDES_COM_DEBUG_BUS0 0x1A0
142#define QSERDES_COM_DEBUG_BUS1 0x1A4
143#define QSERDES_COM_DEBUG_BUS2 0x1A8
144#define QSERDES_COM_DEBUG_BUS3 0x1AC
145#define QSERDES_COM_DEBUG_BUS_SEL 0x1B0
146
147#define QSERDES_TX_N_RES_CODE_LANE_OFFSET(n, m) (TX(n, m) + 0x4C)
148#define QSERDES_TX_N_DEBUG_BUS_SEL(n, m) (TX(n, m) + 0x64)
149#define QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(n, m) (TX(n, m) + 0x68)
150#define QSERDES_TX_N_LANE_MODE(n, m) (TX(n, m) + 0x94)
151#define QSERDES_TX_N_RCV_DETECT_LVL_2(n, m) (TX(n, m) + 0xAC)
152
153#define QSERDES_RX_N_UCDR_SO_GAIN_HALF(n, m) (RX(n, m) + 0x010)
154#define QSERDES_RX_N_UCDR_SO_GAIN(n, m) (RX(n, m) + 0x01C)
155#define QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(n, m) (RX(n, m) + 0x048)
156#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(n, m) (RX(n, m) + 0x0D8)
157#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(n, m) (RX(n, m) + 0x0DC)
158#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(n, m) (RX(n, m) + 0x0E0)
159#define QSERDES_RX_N_SIGDET_ENABLES(n, m) (RX(n, m) + 0x110)
160#define QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(n, m) (RX(n, m) + 0x11C)
161#define QSERDES_RX_N_SIGDET_LVL(n, m) (RX(n, m) + 0x118)
162#define QSERDES_RX_N_RX_BAND(n, m) (RX(n, m) + 0x120)
163
164#define PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x00)
165#define PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x04)
166#define PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x08)
167#define PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x0C)
168#define PCIE_MISC_N_DEBUG_BUS_0_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x14)
169#define PCIE_MISC_N_DEBUG_BUS_1_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x18)
170#define PCIE_MISC_N_DEBUG_BUS_2_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x1C)
171#define PCIE_MISC_N_DEBUG_BUS_3_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x20)
172
173#define PCIE_N_SW_RESET(n, m) (PCS_PORT(n, m) + 0x00)
174#define PCIE_N_POWER_DOWN_CONTROL(n, m) (PCS_PORT(n, m) + 0x04)
175#define PCIE_N_START_CONTROL(n, m) (PCS_PORT(n, m) + 0x08)
176#define PCIE_N_TXDEEMPH_M6DB_V0(n, m) (PCS_PORT(n, m) + 0x24)
177#define PCIE_N_TXDEEMPH_M3P5DB_V0(n, m) (PCS_PORT(n, m) + 0x28)
178#define PCIE_N_ENDPOINT_REFCLK_DRIVE(n, m) (PCS_PORT(n, m) + 0x54)
179#define PCIE_N_RX_IDLE_DTCT_CNTRL(n, m) (PCS_PORT(n, m) + 0x58)
180#define PCIE_N_POWER_STATE_CONFIG1(n, m) (PCS_PORT(n, m) + 0x60)
181#define PCIE_N_POWER_STATE_CONFIG4(n, m) (PCS_PORT(n, m) + 0x6C)
182#define PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA0)
183#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA4)
184#define PCIE_N_PLL_LOCK_CHK_DLY_TIME(n, m) (PCS_PORT(n, m) + 0xA8)
185#define PCIE_N_TEST_CONTROL4(n, m) (PCS_PORT(n, m) + 0x11C)
186#define PCIE_N_TEST_CONTROL5(n, m) (PCS_PORT(n, m) + 0x120)
187#define PCIE_N_TEST_CONTROL6(n, m) (PCS_PORT(n, m) + 0x124)
188#define PCIE_N_TEST_CONTROL7(n, m) (PCS_PORT(n, m) + 0x128)
189#define PCIE_N_PCS_STATUS(n, m) (PCS_PORT(n, m) + 0x174)
190#define PCIE_N_DEBUG_BUS_0_STATUS(n, m) (PCS_PORT(n, m) + 0x198)
191#define PCIE_N_DEBUG_BUS_1_STATUS(n, m) (PCS_PORT(n, m) + 0x19C)
192#define PCIE_N_DEBUG_BUS_2_STATUS(n, m) (PCS_PORT(n, m) + 0x1A0)
193#define PCIE_N_DEBUG_BUS_3_STATUS(n, m) (PCS_PORT(n, m) + 0x1A4)
194#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1A8)
195#define PCIE_N_OSC_DTCT_ACTIONS(n, m) (PCS_PORT(n, m) + 0x1AC)
196#define PCIE_N_SIGDET_CNTRL(n, m) (PCS_PORT(n, m) + 0x1B0)
197#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(n, m) (PCS_PORT(n, m) + 0x1DC)
198#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1E0)
199
200#define PCIE_COM_SW_RESET 0x400
201#define PCIE_COM_POWER_DOWN_CONTROL 0x404
202#define PCIE_COM_START_CONTROL 0x408
203#define PCIE_COM_DEBUG_BUS_BYTE0_INDEX 0x438
204#define PCIE_COM_DEBUG_BUS_BYTE1_INDEX 0x43C
205#define PCIE_COM_DEBUG_BUS_BYTE2_INDEX 0x440
206#define PCIE_COM_DEBUG_BUS_BYTE3_INDEX 0x444
207#define PCIE_COM_PCS_READY_STATUS 0x448
208#define PCIE_COM_DEBUG_BUS_0_STATUS 0x45C
209#define PCIE_COM_DEBUG_BUS_1_STATUS 0x460
210#define PCIE_COM_DEBUG_BUS_2_STATUS 0x464
211#define PCIE_COM_DEBUG_BUS_3_STATUS 0x468
212
213#define PCIE20_PARF_SYS_CTRL 0x00
214#define PCIE20_PARF_PM_STTS 0x24
215#define PCIE20_PARF_PCS_DEEMPH 0x34
216#define PCIE20_PARF_PCS_SWING 0x38
217#define PCIE20_PARF_PHY_CTRL 0x40
218#define PCIE20_PARF_PHY_REFCLK 0x4C
219#define PCIE20_PARF_CONFIG_BITS 0x50
220#define PCIE20_PARF_TEST_BUS 0xE4
221#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
222#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
223#define PCIE20_PARF_LTSSM 0x1B0
224#define PCIE20_PARF_INT_ALL_STATUS 0x224
225#define PCIE20_PARF_INT_ALL_CLEAR 0x228
226#define PCIE20_PARF_INT_ALL_MASK 0x22C
227#define PCIE20_PARF_SID_OFFSET 0x234
228#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
229#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
230
231#define PCIE20_ELBI_VERSION 0x00
232#define PCIE20_ELBI_SYS_CTRL 0x04
233#define PCIE20_ELBI_SYS_STTS 0x08
234
235#define PCIE20_CAP 0x70
236#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
237#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
238
239#define PCIE20_COMMAND_STATUS 0x04
240#define PCIE20_HEADER_TYPE 0x0C
241#define PCIE20_BUSNUMBERS 0x18
242#define PCIE20_MEMORY_BASE_LIMIT 0x20
243#define PCIE20_BRIDGE_CTRL 0x3C
244#define PCIE20_DEVICE_CONTROL_STATUS 0x78
245#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
246
247#define PCIE20_AUX_CLK_FREQ_REG 0xB40
248#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
249#define PCIE20_ACK_N_FTS 0xff00
250
251#define PCIE20_PLR_IATU_VIEWPORT 0x900
252#define PCIE20_PLR_IATU_CTRL1 0x904
253#define PCIE20_PLR_IATU_CTRL2 0x908
254#define PCIE20_PLR_IATU_LBAR 0x90C
255#define PCIE20_PLR_IATU_UBAR 0x910
256#define PCIE20_PLR_IATU_LAR 0x914
257#define PCIE20_PLR_IATU_LTAR 0x918
258#define PCIE20_PLR_IATU_UTAR 0x91c
259
260#define PCIE20_CTRL1_TYPE_CFG0 0x04
261#define PCIE20_CTRL1_TYPE_CFG1 0x05
262
263#define PCIE20_CAP_ID 0x10
264#define L1SUB_CAP_ID 0x1E
265
266#define PCIE_CAP_PTR_OFFSET 0x34
267#define PCIE_EXT_CAP_OFFSET 0x100
268
269#define PCIE20_AER_UNCORR_ERR_STATUS_REG 0x104
270#define PCIE20_AER_CORR_ERR_STATUS_REG 0x110
271#define PCIE20_AER_ROOT_ERR_STATUS_REG 0x130
272#define PCIE20_AER_ERR_SRC_ID_REG 0x134
273
274#define RD 0
275#define WR 1
276#define MSM_PCIE_ERROR -1
277
278#define PERST_PROPAGATION_DELAY_US_MIN 1000
279#define PERST_PROPAGATION_DELAY_US_MAX 1005
280#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
281#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
282#define LINK_UP_TIMEOUT_US_MIN 5000
283#define LINK_UP_TIMEOUT_US_MAX 5100
284#define LINK_UP_CHECK_MAX_COUNT 20
285#define PHY_STABILIZATION_DELAY_US_MIN 995
286#define PHY_STABILIZATION_DELAY_US_MAX 1005
287#define POWER_DOWN_DELAY_US_MIN 10
288#define POWER_DOWN_DELAY_US_MAX 11
289#define LINKDOWN_INIT_WAITING_US_MIN 995
290#define LINKDOWN_INIT_WAITING_US_MAX 1005
291#define LINKDOWN_WAITING_US_MIN 4900
292#define LINKDOWN_WAITING_US_MAX 5100
293#define LINKDOWN_WAITING_COUNT 200
294
295#define PHY_READY_TIMEOUT_COUNT 10
296#define XMLH_LINK_UP 0x400
297#define MAX_LINK_RETRIES 5
298#define MAX_BUS_NUM 3
299#define MAX_PROP_SIZE 32
300#define MAX_RC_NAME_LEN 15
301#define MSM_PCIE_MAX_VREG 4
302#define MSM_PCIE_MAX_CLK 9
303#define MSM_PCIE_MAX_PIPE_CLK 1
304#define MAX_RC_NUM 3
305#define MAX_DEVICE_NUM 20
306#define MAX_SHORT_BDF_NUM 16
307#define PCIE_TLP_RD_SIZE 0x5
308#define PCIE_MSI_NR_IRQS 256
309#define MSM_PCIE_MAX_MSI 32
310#define MAX_MSG_LEN 80
311#define PCIE_LOG_PAGES (50)
312#define PCIE_CONF_SPACE_DW 1024
313#define PCIE_CLEAR 0xDEADBEEF
314#define PCIE_LINK_DOWN 0xFFFFFFFF
315
316#define MSM_PCIE_MAX_RESET 4
317#define MSM_PCIE_MAX_PIPE_RESET 1
318
319#define MSM_PCIE_MSI_PHY 0xa0000000
320#define PCIE20_MSI_CTRL_ADDR (0x820)
321#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
322#define PCIE20_MSI_CTRL_INTR_EN (0x828)
323#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
324#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
325#define PCIE20_MSI_CTRL_MAX 8
326
327/* PM control options */
328#define PM_IRQ 0x1
329#define PM_CLK 0x2
330#define PM_GPIO 0x4
331#define PM_VREG 0x8
332#define PM_PIPE_CLK 0x10
333#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
334
335#ifdef CONFIG_PHYS_ADDR_T_64BIT
336#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
337#else
338#define PCIE_UPPER_ADDR(addr) (0x0)
339#endif
340#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
341
342/* Config Space Offsets */
343#define BDF_OFFSET(bus, devfn) \
344 ((bus << 24) | (devfn << 16))
345
346#define PCIE_GEN_DBG(x...) do { \
347 if (msm_pcie_debug_mask) \
348 pr_alert(x); \
349 } while (0)
350
351#define PCIE_DBG(dev, fmt, arg...) do { \
352 if ((dev) && (dev)->ipc_log_long) \
353 ipc_log_string((dev)->ipc_log_long, \
354 "DBG1:%s: " fmt, __func__, arg); \
355 if ((dev) && (dev)->ipc_log) \
356 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
357 if (msm_pcie_debug_mask) \
358 pr_alert("%s: " fmt, __func__, arg); \
359 } while (0)
360
361#define PCIE_DBG2(dev, fmt, arg...) do { \
362 if ((dev) && (dev)->ipc_log) \
363 ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
364 if (msm_pcie_debug_mask) \
365 pr_alert("%s: " fmt, __func__, arg); \
366 } while (0)
367
368#define PCIE_DBG3(dev, fmt, arg...) do { \
369 if ((dev) && (dev)->ipc_log) \
370 ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
371 if (msm_pcie_debug_mask) \
372 pr_alert("%s: " fmt, __func__, arg); \
373 } while (0)
374
375#define PCIE_DUMP(dev, fmt, arg...) do { \
376 if ((dev) && (dev)->ipc_log_dump) \
377 ipc_log_string((dev)->ipc_log_dump, \
378 "DUMP:%s: " fmt, __func__, arg); \
379 } while (0)
380
381#define PCIE_DBG_FS(dev, fmt, arg...) do { \
382 if ((dev) && (dev)->ipc_log_dump) \
383 ipc_log_string((dev)->ipc_log_dump, \
384 "DBG_FS:%s: " fmt, __func__, arg); \
385 pr_alert("%s: " fmt, __func__, arg); \
386 } while (0)
387
388#define PCIE_INFO(dev, fmt, arg...) do { \
389 if ((dev) && (dev)->ipc_log_long) \
390 ipc_log_string((dev)->ipc_log_long, \
391 "INFO:%s: " fmt, __func__, arg); \
392 if ((dev) && (dev)->ipc_log) \
393 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
394 pr_info("%s: " fmt, __func__, arg); \
395 } while (0)
396
397#define PCIE_ERR(dev, fmt, arg...) do { \
398 if ((dev) && (dev)->ipc_log_long) \
399 ipc_log_string((dev)->ipc_log_long, \
400 "ERR:%s: " fmt, __func__, arg); \
401 if ((dev) && (dev)->ipc_log) \
402 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
403 pr_err("%s: " fmt, __func__, arg); \
404 } while (0)
405
406
407enum msm_pcie_res {
408 MSM_PCIE_RES_PARF,
409 MSM_PCIE_RES_PHY,
410 MSM_PCIE_RES_DM_CORE,
411 MSM_PCIE_RES_ELBI,
412 MSM_PCIE_RES_CONF,
413 MSM_PCIE_RES_IO,
414 MSM_PCIE_RES_BARS,
415 MSM_PCIE_RES_TCSR,
416 MSM_PCIE_MAX_RES,
417};
418
419enum msm_pcie_irq {
420 MSM_PCIE_INT_MSI,
421 MSM_PCIE_INT_A,
422 MSM_PCIE_INT_B,
423 MSM_PCIE_INT_C,
424 MSM_PCIE_INT_D,
425 MSM_PCIE_INT_PLS_PME,
426 MSM_PCIE_INT_PME_LEGACY,
427 MSM_PCIE_INT_PLS_ERR,
428 MSM_PCIE_INT_AER_LEGACY,
429 MSM_PCIE_INT_LINK_UP,
430 MSM_PCIE_INT_LINK_DOWN,
431 MSM_PCIE_INT_BRIDGE_FLUSH_N,
432 MSM_PCIE_INT_GLOBAL_INT,
433 MSM_PCIE_MAX_IRQ,
434};
435
436enum msm_pcie_irq_event {
437 MSM_PCIE_INT_EVT_LINK_DOWN = 1,
438 MSM_PCIE_INT_EVT_BME,
439 MSM_PCIE_INT_EVT_PM_TURNOFF,
440 MSM_PCIE_INT_EVT_DEBUG,
441 MSM_PCIE_INT_EVT_LTR,
442 MSM_PCIE_INT_EVT_MHI_Q6,
443 MSM_PCIE_INT_EVT_MHI_A7,
444 MSM_PCIE_INT_EVT_DSTATE_CHANGE,
445 MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
446 MSM_PCIE_INT_EVT_MMIO_WRITE,
447 MSM_PCIE_INT_EVT_CFG_WRITE,
448 MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
449 MSM_PCIE_INT_EVT_LINK_UP,
450 MSM_PCIE_INT_EVT_AER_LEGACY,
451 MSM_PCIE_INT_EVT_AER_ERR,
452 MSM_PCIE_INT_EVT_PME_LEGACY,
453 MSM_PCIE_INT_EVT_PLS_PME,
454 MSM_PCIE_INT_EVT_INTD,
455 MSM_PCIE_INT_EVT_INTC,
456 MSM_PCIE_INT_EVT_INTB,
457 MSM_PCIE_INT_EVT_INTA,
458 MSM_PCIE_INT_EVT_EDMA,
459 MSM_PCIE_INT_EVT_MSI_0,
460 MSM_PCIE_INT_EVT_MSI_1,
461 MSM_PCIE_INT_EVT_MSI_2,
462 MSM_PCIE_INT_EVT_MSI_3,
463 MSM_PCIE_INT_EVT_MSI_4,
464 MSM_PCIE_INT_EVT_MSI_5,
465 MSM_PCIE_INT_EVT_MSI_6,
466 MSM_PCIE_INT_EVT_MSI_7,
467 MSM_PCIE_INT_EVT_MAX = 30,
468};
469
470enum msm_pcie_gpio {
471 MSM_PCIE_GPIO_PERST,
472 MSM_PCIE_GPIO_WAKE,
473 MSM_PCIE_GPIO_EP,
474 MSM_PCIE_MAX_GPIO
475};
476
477enum msm_pcie_link_status {
478 MSM_PCIE_LINK_DEINIT,
479 MSM_PCIE_LINK_ENABLED,
480 MSM_PCIE_LINK_DISABLED
481};
482
483/* gpio info structure */
484struct msm_pcie_gpio_info_t {
485 char *name;
486 uint32_t num;
487 bool out;
488 uint32_t on;
489 uint32_t init;
490 bool required;
491};
492
493/* voltage regulator info structrue */
494struct msm_pcie_vreg_info_t {
495 struct regulator *hdl;
496 char *name;
497 uint32_t max_v;
498 uint32_t min_v;
499 uint32_t opt_mode;
500 bool required;
501};
502
503/* reset info structure */
504struct msm_pcie_reset_info_t {
505 struct reset_control *hdl;
506 char *name;
507 bool required;
508};
509
510/* clock info structure */
511struct msm_pcie_clk_info_t {
512 struct clk *hdl;
513 char *name;
514 u32 freq;
515 bool config_mem;
516 bool required;
517};
518
519/* resource info structure */
520struct msm_pcie_res_info_t {
521 char *name;
522 struct resource *resource;
523 void __iomem *base;
524};
525
526/* irq info structrue */
527struct msm_pcie_irq_info_t {
528 char *name;
529 uint32_t num;
530};
531
532/* phy info structure */
533struct msm_pcie_phy_info_t {
534 u32 offset;
535 u32 val;
536 u32 delay;
537};
538
539/* PCIe device info structure */
540struct msm_pcie_device_info {
541 u32 bdf;
542 struct pci_dev *dev;
543 short short_bdf;
544 u32 sid;
545 int domain;
546 void __iomem *conf_base;
547 unsigned long phy_address;
548 u32 dev_ctrlstts_offset;
549 struct msm_pcie_register_event *event_reg;
550 bool registered;
551};
552
553/* msm pcie device structure */
554struct msm_pcie_dev_t {
555 struct platform_device *pdev;
556 struct pci_dev *dev;
557 struct regulator *gdsc;
558 struct regulator *gdsc_smmu;
559 struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
560 struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
561 struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
562 struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
563 struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
564 struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
565 struct msm_pcie_irq_info_t msi[MSM_PCIE_MAX_MSI];
566 struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
567 struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
568
569 void __iomem *parf;
570 void __iomem *phy;
571 void __iomem *elbi;
572 void __iomem *dm_core;
573 void __iomem *conf;
574 void __iomem *bars;
575 void __iomem *tcsr;
576
577 uint32_t axi_bar_start;
578 uint32_t axi_bar_end;
579
580 struct resource *dev_mem_res;
581 struct resource *dev_io_res;
582
583 uint32_t wake_n;
584 uint32_t vreg_n;
585 uint32_t gpio_n;
586 uint32_t parf_deemph;
587 uint32_t parf_swing;
588
589 bool cfg_access;
590 spinlock_t cfg_lock;
591 unsigned long irqsave_flags;
592 struct mutex enumerate_lock;
593 struct mutex setup_lock;
594
595 struct irq_domain *irq_domain;
596 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
597 uint32_t msi_gicm_addr;
598 uint32_t msi_gicm_base;
599 bool use_msi;
600
601 enum msm_pcie_link_status link_status;
602 bool user_suspend;
603 bool disable_pc;
604 struct pci_saved_state *saved_state;
605
606 struct wakeup_source ws;
607 struct msm_bus_scale_pdata *bus_scale_table;
608 uint32_t bus_client;
609
610 bool l0s_supported;
611 bool l1_supported;
612 bool l1ss_supported;
613 bool common_clk_en;
614 bool clk_power_manage_en;
615 bool aux_clk_sync;
616 bool aer_enable;
617 bool smmu_exist;
618 uint32_t smmu_sid_base;
619 uint32_t n_fts;
620 bool ext_ref_clk;
621 bool common_phy;
622 uint32_t ep_latency;
623 uint32_t wr_halt_size;
624 uint32_t cpl_timeout;
625 uint32_t current_bdf;
626 short current_short_bdf;
627 uint32_t perst_delay_us_min;
628 uint32_t perst_delay_us_max;
629 uint32_t tlp_rd_size;
630 bool linkdown_panic;
631 bool ep_wakeirq;
632
633 uint32_t rc_idx;
634 uint32_t phy_ver;
635 bool drv_ready;
636 bool enumerated;
637 struct work_struct handle_wake_work;
638 struct mutex recovery_lock;
639 spinlock_t linkdown_lock;
640 spinlock_t wakeup_lock;
641 spinlock_t global_irq_lock;
642 spinlock_t aer_lock;
643 ulong linkdown_counter;
644 ulong link_turned_on_counter;
645 ulong link_turned_off_counter;
646 ulong rc_corr_counter;
647 ulong rc_non_fatal_counter;
648 ulong rc_fatal_counter;
649 ulong ep_corr_counter;
650 ulong ep_non_fatal_counter;
651 ulong ep_fatal_counter;
652 bool suspending;
653 ulong wake_counter;
654 u32 num_active_ep;
655 u32 num_ep;
656 bool pending_ep_reg;
657 u32 phy_len;
658 u32 port_phy_len;
659 struct msm_pcie_phy_info_t *phy_sequence;
660 struct msm_pcie_phy_info_t *port_phy_sequence;
661 u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
662 u32 rc_shadow[PCIE_CONF_SPACE_DW];
663 bool shadow_en;
664 bool bridge_found;
665 struct msm_pcie_register_event *event_reg;
666 unsigned int scm_dev_id;
667 bool power_on;
668 void *ipc_log;
669 void *ipc_log_long;
670 void *ipc_log_dump;
671 bool use_19p2mhz_aux_clk;
672 bool use_pinctrl;
673 struct pinctrl *pinctrl;
674 struct pinctrl_state *pins_default;
675 struct pinctrl_state *pins_sleep;
676 struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
677};
678
679
680/* debug mask sys interface */
681static int msm_pcie_debug_mask;
682module_param_named(debug_mask, msm_pcie_debug_mask,
683 int, 0644);
684
685/* debugfs values */
686static u32 rc_sel;
687static u32 base_sel;
688static u32 wr_offset;
689static u32 wr_mask;
690static u32 wr_value;
691static ulong corr_counter_limit = 5;
692
693/* counter to keep track if common PHY needs to be configured */
694static u32 num_rc_on;
695
696/* global lock for PCIe common PHY */
697static struct mutex com_phy_lock;
698
699/* Table to track info of PCIe devices */
700static struct msm_pcie_device_info
701 msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
702
703/* PCIe driver state */
704struct pcie_drv_sta {
705 u32 rc_num;
706 struct mutex drv_lock;
707} pcie_drv;
708
709/* msm pcie device data */
710static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
711
712/* regulators */
713static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
714 {NULL, "vreg-3.3", 0, 0, 0, false},
715 {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
716 {NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
717 {NULL, "vreg-cx", 0, 0, 0, false}
718};
719
720/* GPIOs */
721static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
722 {"perst-gpio", 0, 1, 0, 0, 1},
723 {"wake-gpio", 0, 0, 0, 0, 0},
724 {"qcom,ep-gpio", 0, 1, 1, 0, 0}
725};
726
727/* resets */
728static struct msm_pcie_reset_info_t
729msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
730 {
731 {NULL, "pcie_phy_reset", false},
732 {NULL, "pcie_phy_com_reset", false},
733 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
734 {NULL, "pcie_0_phy_reset", false}
735 },
736 {
737 {NULL, "pcie_phy_reset", false},
738 {NULL, "pcie_phy_com_reset", false},
739 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
740 {NULL, "pcie_1_phy_reset", false}
741 },
742 {
743 {NULL, "pcie_phy_reset", false},
744 {NULL, "pcie_phy_com_reset", false},
745 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
746 {NULL, "pcie_2_phy_reset", false}
747 }
748};
749
750/* pipe reset */
751static struct msm_pcie_reset_info_t
752msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
753 {
754 {NULL, "pcie_0_phy_pipe_reset", false}
755 },
756 {
757 {NULL, "pcie_1_phy_pipe_reset", false}
758 },
759 {
760 {NULL, "pcie_2_phy_pipe_reset", false}
761 }
762};
763
764/* clocks */
765static struct msm_pcie_clk_info_t
766 msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
767 {
768 {NULL, "pcie_0_ref_clk_src", 0, false, false},
769 {NULL, "pcie_0_aux_clk", 1010000, false, true},
770 {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
771 {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
772 {NULL, "pcie_0_slv_axi_clk", 0, true, true},
773 {NULL, "pcie_0_ldo", 0, false, true},
774 {NULL, "pcie_0_smmu_clk", 0, false, false},
775 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
776 {NULL, "pcie_phy_aux_clk", 0, false, false}
777 },
778 {
779 {NULL, "pcie_1_ref_clk_src", 0, false, false},
780 {NULL, "pcie_1_aux_clk", 1010000, false, true},
781 {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
782 {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
783 {NULL, "pcie_1_slv_axi_clk", 0, true, true},
784 {NULL, "pcie_1_ldo", 0, false, true},
785 {NULL, "pcie_1_smmu_clk", 0, false, false},
786 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
787 {NULL, "pcie_phy_aux_clk", 0, false, false}
788 },
789 {
790 {NULL, "pcie_2_ref_clk_src", 0, false, false},
791 {NULL, "pcie_2_aux_clk", 1010000, false, true},
792 {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
793 {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
794 {NULL, "pcie_2_slv_axi_clk", 0, true, true},
795 {NULL, "pcie_2_ldo", 0, false, true},
796 {NULL, "pcie_2_smmu_clk", 0, false, false},
797 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
798 {NULL, "pcie_phy_aux_clk", 0, false, false}
799 }
800};
801
802/* Pipe Clocks */
803static struct msm_pcie_clk_info_t
804 msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
805 {
806 {NULL, "pcie_0_pipe_clk", 125000000, true, true},
807 },
808 {
809 {NULL, "pcie_1_pipe_clk", 125000000, true, true},
810 },
811 {
812 {NULL, "pcie_2_pipe_clk", 125000000, true, true},
813 }
814};
815
816/* resources */
817static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
818 {"parf", 0, 0},
819 {"phy", 0, 0},
820 {"dm_core", 0, 0},
821 {"elbi", 0, 0},
822 {"conf", 0, 0},
823 {"io", 0, 0},
824 {"bars", 0, 0},
825 {"tcsr", 0, 0}
826};
827
828/* irqs */
829static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
830 {"int_msi", 0},
831 {"int_a", 0},
832 {"int_b", 0},
833 {"int_c", 0},
834 {"int_d", 0},
835 {"int_pls_pme", 0},
836 {"int_pme_legacy", 0},
837 {"int_pls_err", 0},
838 {"int_aer_legacy", 0},
839 {"int_pls_link_up", 0},
840 {"int_pls_link_down", 0},
841 {"int_bridge_flush_n", 0},
842 {"int_global_int", 0}
843};
844
845/* MSIs */
846static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
847 {"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
848 {"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
849 {"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
850 {"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
851 {"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
852 {"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
853 {"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
854 {"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
855};
856
857#ifdef CONFIG_ARM
858#define PCIE_BUS_PRIV_DATA(bus) \
859 (((struct pci_sys_data *)bus->sysdata)->private_data)
860
861static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
862
863static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
864{
865 msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
866 msm_pcie_sys_data[dev->rc_idx].private_data = dev;
867
868 return &msm_pcie_sys_data[dev->rc_idx];
869}
870
871static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
872{
873 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
874}
875#else
876#define PCIE_BUS_PRIV_DATA(bus) \
877 (struct msm_pcie_dev_t *)(bus->sysdata)
878
879static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
880{
881 return dev;
882}
883
884static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
885{
886}
887#endif
888
889static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value)
890{
891 writel_relaxed(value, base + offset);
892 /* ensure that changes propagated to the hardware */
893 wmb();
894}
895
896static inline void msm_pcie_write_reg_field(void *base, u32 offset,
897 const u32 mask, u32 val)
898{
899 u32 shift = find_first_bit((void *)&mask, 32);
900 u32 tmp = readl_relaxed(base + offset);
901
902 tmp &= ~mask; /* clear written bits */
903 val = tmp | (val << shift);
904 writel_relaxed(val, base + offset);
905 /* ensure that changes propagated to the hardware */
906 wmb();
907}
908
909static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
910 struct msm_pcie_clk_info_t *info)
911{
912 int ret;
913
914 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
915 if (ret)
916 PCIE_ERR(dev,
917 "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
918 dev->rc_idx, info->name, ret);
919 else
920 PCIE_DBG2(dev,
921 "PCIe: RC%d configured core memory for clk %s.\n",
922 dev->rc_idx, info->name);
923
924 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
925 if (ret)
926 PCIE_ERR(dev,
927 "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
928 dev->rc_idx, info->name, ret);
929 else
930 PCIE_DBG2(dev,
931 "PCIe: RC%d configured peripheral memory for clk %s.\n",
932 dev->rc_idx, info->name);
933}
934
935#if defined(CONFIG_ARCH_FSM9010)
936#define PCIE20_PARF_PHY_STTS 0x3c
937#define PCIE2_PHY_RESET_CTRL 0x44
938#define PCIE20_PARF_PHY_REFCLK_CTRL2 0xa0
939#define PCIE20_PARF_PHY_REFCLK_CTRL3 0xa4
940#define PCIE20_PARF_PCS_SWING_CTRL1 0x88
941#define PCIE20_PARF_PCS_SWING_CTRL2 0x8c
942#define PCIE20_PARF_PCS_DEEMPH1 0x74
943#define PCIE20_PARF_PCS_DEEMPH2 0x78
944#define PCIE20_PARF_PCS_DEEMPH3 0x7c
945#define PCIE20_PARF_CONFIGBITS 0x84
946#define PCIE20_PARF_PHY_CTRL3 0x94
947#define PCIE20_PARF_PCS_CTRL 0x80
948
949#define TX_AMP_VAL 127
950#define PHY_RX0_EQ_GEN1_VAL 0
951#define PHY_RX0_EQ_GEN2_VAL 4
952#define TX_DEEMPH_GEN1_VAL 24
953#define TX_DEEMPH_GEN2_3_5DB_VAL 24
954#define TX_DEEMPH_GEN2_6DB_VAL 34
955#define PHY_TX0_TERM_OFFST_VAL 0
956
957static inline void pcie_phy_dump(struct msm_pcie_dev_t *dev)
958{
959}
960
961static inline void pcie20_phy_reset(struct msm_pcie_dev_t *dev, uint32_t assert)
962{
963 msm_pcie_write_reg_field(dev->phy, PCIE2_PHY_RESET_CTRL,
964 BIT(0), (assert) ? 1 : 0);
965}
966
967static void pcie_phy_init(struct msm_pcie_dev_t *dev)
968{
969 PCIE_DBG(dev, "RC%d: Initializing 28LP SNS phy - 100MHz\n",
970 dev->rc_idx);
971
972 /* De-assert Phy SW Reset */
973 pcie20_phy_reset(dev, 1);
974
975 /* Program SSP ENABLE */
976 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL2) & BIT(0))
977 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2,
978 BIT(0), 0);
979 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL3) &
980 BIT(0)) == 0)
981 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL3,
982 BIT(0), 1);
983 /* Program Tx Amplitude */
984 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL1) &
985 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
986 TX_AMP_VAL)
987 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL1,
988 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
989 TX_AMP_VAL);
990 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL2) &
991 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
992 TX_AMP_VAL)
993 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL2,
994 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
995 TX_AMP_VAL);
996 /* Program De-Emphasis */
997 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH1) &
998 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
999 TX_DEEMPH_GEN2_6DB_VAL)
1000 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH1,
1001 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1002 TX_DEEMPH_GEN2_6DB_VAL);
1003
1004 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH2) &
1005 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1006 TX_DEEMPH_GEN2_3_5DB_VAL)
1007 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH2,
1008 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1009 TX_DEEMPH_GEN2_3_5DB_VAL);
1010
1011 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH3) &
1012 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1013 TX_DEEMPH_GEN1_VAL)
1014 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH3,
1015 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1016 TX_DEEMPH_GEN1_VAL);
1017
1018 /* Program Rx_Eq */
1019 if ((readl_relaxed(dev->phy + PCIE20_PARF_CONFIGBITS) &
1020 (BIT(2)|BIT(1)|BIT(0))) != PHY_RX0_EQ_GEN1_VAL)
1021 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_CONFIGBITS,
1022 BIT(2)|BIT(1)|BIT(0), PHY_RX0_EQ_GEN1_VAL);
1023
1024 /* Program Tx0_term_offset */
1025 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_CTRL3) &
1026 (BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1027 PHY_TX0_TERM_OFFST_VAL)
1028 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_CTRL3,
1029 BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1030 PHY_TX0_TERM_OFFST_VAL);
1031
1032 /* Program REF_CLK source */
1033 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2, BIT(1),
1034 (dev->ext_ref_clk) ? 1 : 0);
1035 /* disable Tx2Rx Loopback */
1036 if (readl_relaxed(dev->phy + PCIE20_PARF_PCS_CTRL) & BIT(1))
1037 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_CTRL,
1038 BIT(1), 0);
1039 /* De-assert Phy SW Reset */
1040 pcie20_phy_reset(dev, 0);
1041}
1042
1043static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1044{
1045
1046 /* read PCIE20_PARF_PHY_STTS twice */
1047 readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS);
1048 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS) & BIT(0))
1049 return false;
1050 else
1051 return true;
1052}
1053#else
1054static void pcie_phy_dump_test_cntrl(struct msm_pcie_dev_t *dev,
1055 u32 cntrl4_val, u32 cntrl5_val,
1056 u32 cntrl6_val, u32 cntrl7_val)
1057{
1058 msm_pcie_write_reg(dev->phy,
1059 PCIE_N_TEST_CONTROL4(dev->rc_idx, dev->common_phy), cntrl4_val);
1060 msm_pcie_write_reg(dev->phy,
1061 PCIE_N_TEST_CONTROL5(dev->rc_idx, dev->common_phy), cntrl5_val);
1062 msm_pcie_write_reg(dev->phy,
1063 PCIE_N_TEST_CONTROL6(dev->rc_idx, dev->common_phy), cntrl6_val);
1064 msm_pcie_write_reg(dev->phy,
1065 PCIE_N_TEST_CONTROL7(dev->rc_idx, dev->common_phy), cntrl7_val);
1066
1067 PCIE_DUMP(dev,
1068 "PCIe: RC%d PCIE_N_TEST_CONTROL4: 0x%x\n", dev->rc_idx,
1069 readl_relaxed(dev->phy +
1070 PCIE_N_TEST_CONTROL4(dev->rc_idx,
1071 dev->common_phy)));
1072 PCIE_DUMP(dev,
1073 "PCIe: RC%d PCIE_N_TEST_CONTROL5: 0x%x\n", dev->rc_idx,
1074 readl_relaxed(dev->phy +
1075 PCIE_N_TEST_CONTROL5(dev->rc_idx,
1076 dev->common_phy)));
1077 PCIE_DUMP(dev,
1078 "PCIe: RC%d PCIE_N_TEST_CONTROL6: 0x%x\n", dev->rc_idx,
1079 readl_relaxed(dev->phy +
1080 PCIE_N_TEST_CONTROL6(dev->rc_idx,
1081 dev->common_phy)));
1082 PCIE_DUMP(dev,
1083 "PCIe: RC%d PCIE_N_TEST_CONTROL7: 0x%x\n", dev->rc_idx,
1084 readl_relaxed(dev->phy +
1085 PCIE_N_TEST_CONTROL7(dev->rc_idx,
1086 dev->common_phy)));
1087 PCIE_DUMP(dev,
1088 "PCIe: RC%d PCIE_N_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rc_idx,
1089 readl_relaxed(dev->phy +
1090 PCIE_N_DEBUG_BUS_0_STATUS(dev->rc_idx,
1091 dev->common_phy)));
1092 PCIE_DUMP(dev,
1093 "PCIe: RC%d PCIE_N_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rc_idx,
1094 readl_relaxed(dev->phy +
1095 PCIE_N_DEBUG_BUS_1_STATUS(dev->rc_idx,
1096 dev->common_phy)));
1097 PCIE_DUMP(dev,
1098 "PCIe: RC%d PCIE_N_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rc_idx,
1099 readl_relaxed(dev->phy +
1100 PCIE_N_DEBUG_BUS_2_STATUS(dev->rc_idx,
1101 dev->common_phy)));
1102 PCIE_DUMP(dev,
1103 "PCIe: RC%d PCIE_N_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rc_idx,
1104 readl_relaxed(dev->phy +
1105 PCIE_N_DEBUG_BUS_3_STATUS(dev->rc_idx,
1106 dev->common_phy)));
1107}
1108
1109static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
1110{
1111 int i, size;
1112 u32 write_val;
1113
1114 if (dev->phy_ver >= 0x20) {
1115 PCIE_DUMP(dev, "PCIe: RC%d PHY dump is not supported\n",
1116 dev->rc_idx);
1117 return;
1118 }
1119
1120 PCIE_DUMP(dev, "PCIe: RC%d PHY testbus\n", dev->rc_idx);
1121
1122 pcie_phy_dump_test_cntrl(dev, 0x18, 0x19, 0x1A, 0x1B);
1123 pcie_phy_dump_test_cntrl(dev, 0x1C, 0x1D, 0x1E, 0x1F);
1124 pcie_phy_dump_test_cntrl(dev, 0x20, 0x21, 0x22, 0x23);
1125
1126 for (i = 0; i < 3; i++) {
1127 write_val = 0x1 + i;
1128 msm_pcie_write_reg(dev->phy,
1129 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1130 dev->common_phy), write_val);
1131 PCIE_DUMP(dev,
1132 "PCIe: RC%d QSERDES_TX_N_DEBUG_BUS_SEL: 0x%x\n",
1133 dev->rc_idx,
1134 readl_relaxed(dev->phy +
1135 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1136 dev->common_phy)));
1137
1138 pcie_phy_dump_test_cntrl(dev, 0x30, 0x31, 0x32, 0x33);
1139 }
1140
1141 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1142
1143 if (dev->phy_ver >= 0x10 && dev->phy_ver < 0x20) {
1144 pcie_phy_dump_test_cntrl(dev, 0x01, 0x02, 0x03, 0x0A);
1145 pcie_phy_dump_test_cntrl(dev, 0x0E, 0x0F, 0x12, 0x13);
1146 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1147
1148 for (i = 0; i < 8; i += 4) {
1149 write_val = 0x1 + i;
1150 msm_pcie_write_reg(dev->phy,
1151 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(dev->rc_idx,
1152 dev->common_phy), write_val);
1153 msm_pcie_write_reg(dev->phy,
1154 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(dev->rc_idx,
1155 dev->common_phy), write_val + 1);
1156 msm_pcie_write_reg(dev->phy,
1157 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(dev->rc_idx,
1158 dev->common_phy), write_val + 2);
1159 msm_pcie_write_reg(dev->phy,
1160 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(dev->rc_idx,
1161 dev->common_phy), write_val + 3);
1162
1163 PCIE_DUMP(dev,
1164 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1165 dev->rc_idx,
1166 readl_relaxed(dev->phy +
1167 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1168 dev->rc_idx, dev->common_phy)));
1169 PCIE_DUMP(dev,
1170 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1171 dev->rc_idx,
1172 readl_relaxed(dev->phy +
1173 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1174 dev->rc_idx, dev->common_phy)));
1175 PCIE_DUMP(dev,
1176 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1177 dev->rc_idx,
1178 readl_relaxed(dev->phy +
1179 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1180 dev->rc_idx, dev->common_phy)));
1181 PCIE_DUMP(dev,
1182 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1183 dev->rc_idx,
1184 readl_relaxed(dev->phy +
1185 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1186 dev->rc_idx, dev->common_phy)));
1187 PCIE_DUMP(dev,
1188 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_0_STATUS: 0x%x\n",
1189 dev->rc_idx,
1190 readl_relaxed(dev->phy +
1191 PCIE_MISC_N_DEBUG_BUS_0_STATUS(
1192 dev->rc_idx, dev->common_phy)));
1193 PCIE_DUMP(dev,
1194 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_1_STATUS: 0x%x\n",
1195 dev->rc_idx,
1196 readl_relaxed(dev->phy +
1197 PCIE_MISC_N_DEBUG_BUS_1_STATUS(
1198 dev->rc_idx, dev->common_phy)));
1199 PCIE_DUMP(dev,
1200 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_2_STATUS: 0x%x\n",
1201 dev->rc_idx,
1202 readl_relaxed(dev->phy +
1203 PCIE_MISC_N_DEBUG_BUS_2_STATUS(
1204 dev->rc_idx, dev->common_phy)));
1205 PCIE_DUMP(dev,
1206 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_3_STATUS: 0x%x\n",
1207 dev->rc_idx,
1208 readl_relaxed(dev->phy +
1209 PCIE_MISC_N_DEBUG_BUS_3_STATUS(
1210 dev->rc_idx, dev->common_phy)));
1211 }
1212
1213 msm_pcie_write_reg(dev->phy,
1214 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1215 dev->rc_idx, dev->common_phy), 0);
1216 msm_pcie_write_reg(dev->phy,
1217 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1218 dev->rc_idx, dev->common_phy), 0);
1219 msm_pcie_write_reg(dev->phy,
1220 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1221 dev->rc_idx, dev->common_phy), 0);
1222 msm_pcie_write_reg(dev->phy,
1223 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1224 dev->rc_idx, dev->common_phy), 0);
1225 }
1226
1227 for (i = 0; i < 2; i++) {
1228 write_val = 0x2 + i;
1229
1230 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
1231 write_val);
1232
1233 PCIE_DUMP(dev,
1234 "PCIe: RC%d to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
1235 dev->rc_idx,
1236 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
1237 PCIE_DUMP(dev,
1238 "PCIe: RC%d QSERDES_COM_DEBUG_BUS0: 0x%x\n",
1239 dev->rc_idx,
1240 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
1241 PCIE_DUMP(dev,
1242 "PCIe: RC%d QSERDES_COM_DEBUG_BUS1: 0x%x\n",
1243 dev->rc_idx,
1244 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
1245 PCIE_DUMP(dev,
1246 "PCIe: RC%d QSERDES_COM_DEBUG_BUS2: 0x%x\n",
1247 dev->rc_idx,
1248 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
1249 PCIE_DUMP(dev,
1250 "PCIe: RC%d QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
1251 dev->rc_idx,
1252 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
1253 }
1254
1255 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
1256
1257 if (dev->common_phy) {
1258 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1259 0x01);
1260 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE1_INDEX,
1261 0x02);
1262 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE2_INDEX,
1263 0x03);
1264 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE3_INDEX,
1265 0x04);
1266
1267 PCIE_DUMP(dev,
1268 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1269 dev->rc_idx,
1270 readl_relaxed(dev->phy +
1271 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1272 PCIE_DUMP(dev,
1273 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1274 dev->rc_idx,
1275 readl_relaxed(dev->phy +
1276 PCIE_COM_DEBUG_BUS_BYTE1_INDEX));
1277 PCIE_DUMP(dev,
1278 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1279 dev->rc_idx,
1280 readl_relaxed(dev->phy +
1281 PCIE_COM_DEBUG_BUS_BYTE2_INDEX));
1282 PCIE_DUMP(dev,
1283 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1284 dev->rc_idx,
1285 readl_relaxed(dev->phy +
1286 PCIE_COM_DEBUG_BUS_BYTE3_INDEX));
1287 PCIE_DUMP(dev,
1288 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n",
1289 dev->rc_idx,
1290 readl_relaxed(dev->phy +
1291 PCIE_COM_DEBUG_BUS_0_STATUS));
1292 PCIE_DUMP(dev,
1293 "PCIe: RC%d PCIE_COM_DEBUG_BUS_1_STATUS: 0x%x\n",
1294 dev->rc_idx,
1295 readl_relaxed(dev->phy +
1296 PCIE_COM_DEBUG_BUS_1_STATUS));
1297 PCIE_DUMP(dev,
1298 "PCIe: RC%d PCIE_COM_DEBUG_BUS_2_STATUS: 0x%x\n",
1299 dev->rc_idx,
1300 readl_relaxed(dev->phy +
1301 PCIE_COM_DEBUG_BUS_2_STATUS));
1302 PCIE_DUMP(dev,
1303 "PCIe: RC%d PCIE_COM_DEBUG_BUS_3_STATUS: 0x%x\n",
1304 dev->rc_idx,
1305 readl_relaxed(dev->phy +
1306 PCIE_COM_DEBUG_BUS_3_STATUS));
1307
1308 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1309 0x05);
1310
1311 PCIE_DUMP(dev,
1312 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1313 dev->rc_idx,
1314 readl_relaxed(dev->phy +
1315 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1316 PCIE_DUMP(dev,
1317 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n\n",
1318 dev->rc_idx,
1319 readl_relaxed(dev->phy +
1320 PCIE_COM_DEBUG_BUS_0_STATUS));
1321 }
1322
1323 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
1324 for (i = 0; i < size; i += 32) {
1325 PCIE_DUMP(dev,
1326 "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1327 dev->rc_idx, i,
1328 readl_relaxed(dev->phy + i),
1329 readl_relaxed(dev->phy + (i + 4)),
1330 readl_relaxed(dev->phy + (i + 8)),
1331 readl_relaxed(dev->phy + (i + 12)),
1332 readl_relaxed(dev->phy + (i + 16)),
1333 readl_relaxed(dev->phy + (i + 20)),
1334 readl_relaxed(dev->phy + (i + 24)),
1335 readl_relaxed(dev->phy + (i + 28)));
1336 }
1337}
1338
1339#ifdef CONFIG_ARCH_MDMCALIFORNIUM
1340static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1341{
1342 u8 common_phy;
1343
1344 PCIE_DBG(dev,
1345 "RC%d: Initializing MDM 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1346 dev->rc_idx);
1347
1348 if (dev->common_phy)
1349 common_phy = 1;
1350 else
1351 common_phy = 0;
1352
1353 msm_pcie_write_reg(dev->phy,
1354 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1355 0x01);
1356 msm_pcie_write_reg(dev->phy,
1357 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1358 0x03);
1359
1360 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
1361 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1362
1363 msm_pcie_write_reg(dev->phy,
1364 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy), 0x06);
1365
1366 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
1367 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1368 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1369 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1370 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1371 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1372 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1373 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1374 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
1375 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1376 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1377
1378 if (dev->tcsr) {
1379 PCIE_DBG(dev, "RC%d: TCSR PHY clock scheme is 0x%x\n",
1380 dev->rc_idx, readl_relaxed(dev->tcsr));
1381
1382 if (readl_relaxed(dev->tcsr) & (BIT(1) | BIT(0)))
1383 msm_pcie_write_reg(dev->phy,
1384 QSERDES_COM_SYSCLK_EN_SEL, 0x0A);
1385 else
1386 msm_pcie_write_reg(dev->phy,
1387 QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1388 }
1389
1390 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1391 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1392 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1393 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1394 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1395 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x0D);
1396 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x04);
1397 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1398 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1399 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1400 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1401 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1402 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1403 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1404 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1405
1406 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1407 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1408 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1409 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1410 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1411 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1412 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1413
1414 msm_pcie_write_reg(dev->phy,
1415 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1416 common_phy), 0x45);
1417
1418 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1419
1420 msm_pcie_write_reg(dev->phy,
1421 QSERDES_TX_N_RES_CODE_LANE_OFFSET(dev->rc_idx, common_phy),
1422 0x02);
1423 msm_pcie_write_reg(dev->phy,
1424 QSERDES_TX_N_RCV_DETECT_LVL_2(dev->rc_idx, common_phy),
1425 0x12);
1426
1427 msm_pcie_write_reg(dev->phy,
1428 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1429 0x1C);
1430 msm_pcie_write_reg(dev->phy,
1431 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1432 0x14);
1433 msm_pcie_write_reg(dev->phy,
1434 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1435 0x01);
1436 msm_pcie_write_reg(dev->phy,
1437 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1438 0x00);
1439 msm_pcie_write_reg(dev->phy,
1440 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1441 0xDB);
1442 msm_pcie_write_reg(dev->phy,
1443 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1444 common_phy),
1445 0x4B);
1446 msm_pcie_write_reg(dev->phy,
1447 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1448 0x04);
1449 msm_pcie_write_reg(dev->phy,
1450 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1451 0x04);
1452
1453 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1454
1455 msm_pcie_write_reg(dev->phy,
1456 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1457 0x04);
1458 msm_pcie_write_reg(dev->phy,
1459 PCIE_N_OSC_DTCT_ACTIONS(dev->rc_idx, common_phy),
1460 0x00);
1461 msm_pcie_write_reg(dev->phy,
1462 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1463 0x40);
1464 msm_pcie_write_reg(dev->phy,
1465 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1466 0x00);
1467 msm_pcie_write_reg(dev->phy,
1468 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(dev->rc_idx, common_phy),
1469 0x40);
1470 msm_pcie_write_reg(dev->phy,
1471 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1472 0x00);
1473 msm_pcie_write_reg(dev->phy,
1474 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1475 0x40);
1476 msm_pcie_write_reg(dev->phy,
1477 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1478 0x73);
1479 msm_pcie_write_reg(dev->phy,
1480 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1481 0x99);
1482 msm_pcie_write_reg(dev->phy,
1483 PCIE_N_TXDEEMPH_M6DB_V0(dev->rc_idx, common_phy),
1484 0x15);
1485 msm_pcie_write_reg(dev->phy,
1486 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1487 0x0E);
1488
1489 msm_pcie_write_reg(dev->phy,
1490 PCIE_N_SIGDET_CNTRL(dev->rc_idx, common_phy),
1491 0x07);
1492
1493 msm_pcie_write_reg(dev->phy,
1494 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1495 0x00);
1496 msm_pcie_write_reg(dev->phy,
1497 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1498 0x03);
1499}
1500
1501static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1502{
1503}
1504
1505static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1506{
1507 if (readl_relaxed(dev->phy +
1508 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) & BIT(6))
1509 return false;
1510 else
1511 return true;
1512}
1513#else
1514static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1515{
1516 int i;
1517 struct msm_pcie_phy_info_t *phy_seq;
1518
1519 PCIE_DBG(dev,
1520 "RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1521 dev->rc_idx);
1522
1523 if (dev->phy_sequence) {
1524 i = dev->phy_len;
1525 phy_seq = dev->phy_sequence;
1526 while (i--) {
1527 msm_pcie_write_reg(dev->phy,
1528 phy_seq->offset,
1529 phy_seq->val);
1530 if (phy_seq->delay)
1531 usleep_range(phy_seq->delay,
1532 phy_seq->delay + 1);
1533 phy_seq++;
1534 }
1535 return;
1536 }
1537
1538 if (dev->common_phy)
1539 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0x01);
1540
1541 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1C);
1542 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1543 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1544 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1545 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x42);
1546 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1547 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1548 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1549 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x01);
1550 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1551 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x00);
1552 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1553 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1554 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1555 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1556 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1557 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1558 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1559 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
1560 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
1561 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1562 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1563 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1564 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1565 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1566 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1567 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1568 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1569 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1570 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1571 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1572 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1573 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1574 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1575 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1576 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1577
1578 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
1579 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1580 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1581
1582 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1583 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1584
1585 if (dev->phy_ver == 0x3) {
1586 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1587 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x40);
1588 }
1589
1590 if (dev->common_phy) {
1591 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x00);
1592 msm_pcie_write_reg(dev->phy, PCIE_COM_START_CONTROL, 0x03);
1593 }
1594}
1595
1596static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1597{
1598 int i;
1599 struct msm_pcie_phy_info_t *phy_seq;
1600 u8 common_phy;
1601
1602 if (dev->phy_ver >= 0x20)
1603 return;
1604
1605 PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
1606
1607 if (dev->common_phy)
1608 common_phy = 1;
1609 else
1610 common_phy = 0;
1611
1612 if (dev->port_phy_sequence) {
1613 i = dev->port_phy_len;
1614 phy_seq = dev->port_phy_sequence;
1615 while (i--) {
1616 msm_pcie_write_reg(dev->phy,
1617 phy_seq->offset,
1618 phy_seq->val);
1619 if (phy_seq->delay)
1620 usleep_range(phy_seq->delay,
1621 phy_seq->delay + 1);
1622 phy_seq++;
1623 }
1624 return;
1625 }
1626
1627 msm_pcie_write_reg(dev->phy,
1628 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1629 common_phy), 0x45);
1630 msm_pcie_write_reg(dev->phy,
1631 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy),
1632 0x06);
1633
1634 msm_pcie_write_reg(dev->phy,
1635 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1636 0x1C);
1637 msm_pcie_write_reg(dev->phy,
1638 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1639 0x17);
1640 msm_pcie_write_reg(dev->phy,
1641 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1642 0x01);
1643 msm_pcie_write_reg(dev->phy,
1644 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1645 0x00);
1646 msm_pcie_write_reg(dev->phy,
1647 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1648 0xDB);
1649 msm_pcie_write_reg(dev->phy,
1650 QSERDES_RX_N_RX_BAND(dev->rc_idx, common_phy),
1651 0x18);
1652 msm_pcie_write_reg(dev->phy,
1653 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1654 0x04);
1655 msm_pcie_write_reg(dev->phy,
1656 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1657 0x04);
1658 msm_pcie_write_reg(dev->phy,
1659 PCIE_N_RX_IDLE_DTCT_CNTRL(dev->rc_idx, common_phy),
1660 0x4C);
1661 msm_pcie_write_reg(dev->phy,
1662 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1663 0x00);
1664 msm_pcie_write_reg(dev->phy,
1665 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1666 0x01);
1667 msm_pcie_write_reg(dev->phy,
1668 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1669 0x05);
1670 msm_pcie_write_reg(dev->phy,
1671 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1672 common_phy), 0x4B);
1673 msm_pcie_write_reg(dev->phy,
1674 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1675 0x14);
1676
1677 msm_pcie_write_reg(dev->phy,
1678 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1679 0x05);
1680 msm_pcie_write_reg(dev->phy,
1681 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1682 0x02);
1683 msm_pcie_write_reg(dev->phy,
1684 PCIE_N_POWER_STATE_CONFIG4(dev->rc_idx, common_phy),
1685 0x00);
1686 msm_pcie_write_reg(dev->phy,
1687 PCIE_N_POWER_STATE_CONFIG1(dev->rc_idx, common_phy),
1688 0xA3);
1689
1690 if (dev->phy_ver == 0x3) {
1691 msm_pcie_write_reg(dev->phy,
1692 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1693 0x19);
1694
1695 msm_pcie_write_reg(dev->phy,
1696 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1697 0x0E);
1698 }
1699
1700 msm_pcie_write_reg(dev->phy,
1701 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1702 0x03);
1703 usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
1704
1705 msm_pcie_write_reg(dev->phy,
1706 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1707 0x00);
1708 msm_pcie_write_reg(dev->phy,
1709 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1710 0x0A);
1711}
1712
1713static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1714{
1715 if (dev->phy_ver >= 0x20) {
1716 if (readl_relaxed(dev->phy +
1717 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) &
1718 BIT(6))
1719 return false;
1720 else
1721 return true;
1722 }
1723
1724 if (!(readl_relaxed(dev->phy + PCIE_COM_PCS_READY_STATUS) & 0x1))
1725 return false;
1726 else
1727 return true;
1728}
1729#endif
1730#endif
1731
1732static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
1733{
1734 int ret, scm_ret;
1735
1736 if (!dev) {
1737 pr_err("PCIe: the input pcie dev is NULL.\n");
1738 return -ENODEV;
1739 }
1740
1741 ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
1742 if (ret || scm_ret) {
1743 PCIE_ERR(dev,
1744 "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
1745 dev->rc_idx, ret, scm_ret);
1746 return ret ? ret : -EINVAL;
1747 }
1748
1749 return 0;
1750}
1751
1752static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
1753 u32 offset)
1754{
1755 if (offset % 4) {
1756 PCIE_ERR(dev,
1757 "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
1758 dev->rc_idx, offset);
1759 return MSM_PCIE_ERROR;
1760 }
1761
1762 return 0;
1763}
1764
1765static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
1766 bool check_sw_stts,
1767 bool check_ep,
1768 void __iomem *ep_conf)
1769{
1770 u32 val;
1771
1772 if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
1773 PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
1774 dev->rc_idx);
1775 return false;
1776 }
1777
1778 if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
1779 PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
1780 dev->rc_idx);
1781 return false;
1782 }
1783
1784 val = readl_relaxed(dev->dm_core);
1785 PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
1786 dev->rc_idx, val);
1787 if (val == PCIE_LINK_DOWN) {
1788 PCIE_ERR(dev,
1789 "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
1790 dev->rc_idx, dev->rc_idx, val);
1791 return false;
1792 }
1793
1794 if (check_ep) {
1795 val = readl_relaxed(ep_conf);
1796 PCIE_DBG(dev,
1797 "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
1798 dev->rc_idx, val);
1799 if (val == PCIE_LINK_DOWN) {
1800 PCIE_ERR(dev,
1801 "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
1802 dev->rc_idx, dev->rc_idx, val);
1803 return false;
1804 }
1805 }
1806
1807 return true;
1808}
1809
1810static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
1811{
1812 int i, j;
1813 u32 val = 0;
1814 u32 *shadow;
1815 void *cfg = dev->conf;
1816
1817 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1818 if (!rc && !dev->pcidev_table[i].bdf)
1819 break;
1820 if (rc) {
1821 cfg = dev->dm_core;
1822 shadow = dev->rc_shadow;
1823 } else {
1824 if (!msm_pcie_confirm_linkup(dev, false, true,
1825 dev->pcidev_table[i].conf_base))
1826 continue;
1827
1828 shadow = dev->ep_shadow[i];
1829 PCIE_DBG(dev,
1830 "PCIe Device: %02x:%02x.%01x\n",
1831 dev->pcidev_table[i].bdf >> 24,
1832 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1833 dev->pcidev_table[i].bdf >> 16 & 0x07);
1834 }
1835 for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
1836 val = shadow[j];
1837 if (val != PCIE_CLEAR) {
1838 PCIE_DBG3(dev,
1839 "PCIe: before recovery:cfg 0x%x:0x%x\n",
1840 j * 4, readl_relaxed(cfg + j * 4));
1841 PCIE_DBG3(dev,
1842 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1843 j, j * 4, val);
1844 writel_relaxed(val, cfg + j * 4);
1845 /* ensure changes propagated to the hardware */
1846 wmb();
1847 PCIE_DBG3(dev,
1848 "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
1849 j * 4, readl_relaxed(cfg + j * 4));
1850 }
1851 }
1852 if (rc)
1853 break;
1854
1855 pci_save_state(dev->pcidev_table[i].dev);
1856 cfg += SZ_4K;
1857 }
1858}
1859
1860static void msm_pcie_write_mask(void __iomem *addr,
1861 uint32_t clear_mask, uint32_t set_mask)
1862{
1863 uint32_t val;
1864
1865 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
1866 writel_relaxed(val, addr);
1867 wmb(); /* ensure data is written to hardware register */
1868}
1869
1870static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
1871{
1872 int i, size;
1873 u32 original;
1874
1875 PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
1876
1877 original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
1878 for (i = 1; i <= 0x1A; i++) {
1879 msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
1880 0xFF0000, i << 16);
1881 PCIE_DUMP(dev,
1882 "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
1883 dev->rc_idx,
1884 readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
1885 readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
1886 }
1887 writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
1888
1889 PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
1890
1891 size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
1892 for (i = 0; i < size; i += 32) {
1893 PCIE_DUMP(dev,
1894 "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1895 dev->rc_idx, i,
1896 readl_relaxed(dev->parf + i),
1897 readl_relaxed(dev->parf + (i + 4)),
1898 readl_relaxed(dev->parf + (i + 8)),
1899 readl_relaxed(dev->parf + (i + 12)),
1900 readl_relaxed(dev->parf + (i + 16)),
1901 readl_relaxed(dev->parf + (i + 20)),
1902 readl_relaxed(dev->parf + (i + 24)),
1903 readl_relaxed(dev->parf + (i + 28)));
1904 }
1905}
1906
1907static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
1908{
1909 PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
1910 dev->rc_idx, dev->enumerated ? "" : "not");
1911 PCIE_DBG_FS(dev, "PCIe: link is %s\n",
1912 (dev->link_status == MSM_PCIE_LINK_ENABLED)
1913 ? "enabled" : "disabled");
1914 PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
1915 dev->cfg_access ? "" : "not");
1916 PCIE_DBG_FS(dev, "use_msi is %d\n",
1917 dev->use_msi);
1918 PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
1919 dev->use_pinctrl);
1920 PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
1921 dev->use_19p2mhz_aux_clk);
1922 PCIE_DBG_FS(dev, "user_suspend is %d\n",
1923 dev->user_suspend);
1924 PCIE_DBG_FS(dev, "num_ep: %d\n",
1925 dev->num_ep);
1926 PCIE_DBG_FS(dev, "num_active_ep: %d\n",
1927 dev->num_active_ep);
1928 PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
1929 dev->pending_ep_reg ? "true" : "false");
1930 PCIE_DBG_FS(dev, "phy_len is %d",
1931 dev->phy_len);
1932 PCIE_DBG_FS(dev, "port_phy_len is %d",
1933 dev->port_phy_len);
1934 PCIE_DBG_FS(dev, "disable_pc is %d",
1935 dev->disable_pc);
1936 PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
1937 dev->l0s_supported ? "" : "not");
1938 PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
1939 dev->l1_supported ? "" : "not");
1940 PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
1941 dev->l1ss_supported ? "" : "not");
1942 PCIE_DBG_FS(dev, "common_clk_en is %d\n",
1943 dev->common_clk_en);
1944 PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
1945 dev->clk_power_manage_en);
1946 PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
1947 dev->aux_clk_sync);
1948 PCIE_DBG_FS(dev, "AER is %s enable\n",
1949 dev->aer_enable ? "" : "not");
1950 PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
1951 dev->ext_ref_clk);
1952 PCIE_DBG_FS(dev, "ep_wakeirq is %d\n",
1953 dev->ep_wakeirq);
1954 PCIE_DBG_FS(dev, "phy_ver is %d\n",
1955 dev->phy_ver);
1956 PCIE_DBG_FS(dev, "drv_ready is %d\n",
1957 dev->drv_ready);
1958 PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
1959 dev->linkdown_panic);
1960 PCIE_DBG_FS(dev, "the link is %s suspending\n",
1961 dev->suspending ? "" : "not");
1962 PCIE_DBG_FS(dev, "shadow is %s enabled\n",
1963 dev->shadow_en ? "" : "not");
1964 PCIE_DBG_FS(dev, "the power of RC is %s on\n",
1965 dev->power_on ? "" : "not");
1966 PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
1967 dev->msi_gicm_addr);
1968 PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
1969 dev->msi_gicm_base);
1970 PCIE_DBG_FS(dev, "bus_client: %d\n",
1971 dev->bus_client);
1972 PCIE_DBG_FS(dev, "current short bdf: %d\n",
1973 dev->current_short_bdf);
1974 PCIE_DBG_FS(dev, "smmu does %s exist\n",
1975 dev->smmu_exist ? "" : "not");
1976 PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
1977 dev->smmu_sid_base);
1978 PCIE_DBG_FS(dev, "n_fts: %d\n",
1979 dev->n_fts);
1980 PCIE_DBG_FS(dev, "common_phy: %d\n",
1981 dev->common_phy);
1982 PCIE_DBG_FS(dev, "ep_latency: %dms\n",
1983 dev->ep_latency);
1984 PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
1985 dev->wr_halt_size);
1986 PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
1987 dev->cpl_timeout);
1988 PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
1989 dev->current_bdf);
1990 PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
1991 dev->perst_delay_us_min);
1992 PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
1993 dev->perst_delay_us_max);
1994 PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
1995 dev->tlp_rd_size);
1996 PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
1997 dev->rc_corr_counter);
1998 PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
1999 dev->rc_non_fatal_counter);
2000 PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
2001 dev->rc_fatal_counter);
2002 PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
2003 dev->ep_corr_counter);
2004 PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
2005 dev->ep_non_fatal_counter);
2006 PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
2007 dev->ep_fatal_counter);
2008 PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
2009 dev->linkdown_counter);
2010 PCIE_DBG_FS(dev, "wake_counter: %lu\n",
2011 dev->wake_counter);
2012 PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
2013 dev->link_turned_on_counter);
2014 PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
2015 dev->link_turned_off_counter);
2016}
2017
2018static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
2019{
2020 int i, j;
2021 u32 val = 0;
2022 u32 *shadow;
2023
2024 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2025 if (!rc && !dev->pcidev_table[i].bdf)
2026 break;
2027 if (rc) {
2028 shadow = dev->rc_shadow;
2029 } else {
2030 shadow = dev->ep_shadow[i];
2031 PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
2032 dev->pcidev_table[i].bdf >> 24,
2033 dev->pcidev_table[i].bdf >> 19 & 0x1f,
2034 dev->pcidev_table[i].bdf >> 16 & 0x07);
2035 }
2036 for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
2037 val = shadow[j];
2038 if (val != PCIE_CLEAR) {
2039 PCIE_DBG_FS(dev,
2040 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
2041 j, j * 4, val);
2042 }
2043 }
2044 if (rc)
2045 break;
2046 }
2047}
2048
2049static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
2050 u32 testcase)
2051{
2052 int ret, i;
2053 u32 base_sel_size = 0;
2054 u32 val = 0;
2055 u32 current_offset = 0;
2056 u32 ep_l1sub_ctrl1_offset = 0;
2057 u32 ep_l1sub_cap_reg1_offset = 0;
2058 u32 ep_link_ctrlstts_offset = 0;
2059 u32 ep_dev_ctrl2stts2_offset = 0;
2060
2061 if (testcase >= 5 && testcase <= 10) {
2062 current_offset =
2063 readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
2064
2065 while (current_offset) {
2066 val = readl_relaxed(dev->conf + current_offset);
2067 if ((val & 0xff) == PCIE20_CAP_ID) {
2068 ep_link_ctrlstts_offset = current_offset +
2069 0x10;
2070 ep_dev_ctrl2stts2_offset = current_offset +
2071 0x28;
2072 break;
2073 }
2074 current_offset = (val >> 8) & 0xff;
2075 }
2076
2077 if (!ep_link_ctrlstts_offset)
2078 PCIE_DBG(dev,
2079 "RC%d endpoint does not support PCIe capability registers\n",
2080 dev->rc_idx);
2081 else
2082 PCIE_DBG(dev,
2083 "RC%d: ep_link_ctrlstts_offset: 0x%x\n",
2084 dev->rc_idx, ep_link_ctrlstts_offset);
2085 }
2086
2087 switch (testcase) {
2088 case 0: /* output status */
2089 PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
2090 dev->rc_idx);
2091 msm_pcie_show_status(dev);
2092 break;
2093 case 1: /* disable link */
2094 PCIE_DBG_FS(dev,
2095 "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
2096 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2097 dev->dev, NULL,
2098 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2099 if (ret)
2100 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2101 __func__);
2102 else
2103 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
2104 __func__);
2105 break;
2106 case 2: /* enable link and recover config space for RC and EP */
2107 PCIE_DBG_FS(dev,
2108 "\n\nPCIe: RC%d: enable link and recover config space\n\n",
2109 dev->rc_idx);
2110 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2111 dev->dev, NULL,
2112 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2113 if (ret)
2114 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2115 __func__);
2116 else {
2117 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2118 msm_pcie_recover_config(dev->dev);
2119 }
2120 break;
2121 case 3: /*
2122 * disable and enable link, recover config space for
2123 * RC and EP
2124 */
2125 PCIE_DBG_FS(dev,
2126 "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
2127 dev->rc_idx);
2128 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2129 dev->dev, NULL,
2130 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2131 if (ret)
2132 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2133 __func__);
2134 else
2135 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
2136 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2137 dev->dev, NULL,
2138 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2139 if (ret)
2140 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2141 __func__);
2142 else {
2143 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2144 msm_pcie_recover_config(dev->dev);
2145 }
2146 break;
2147 case 4: /* dump shadow registers for RC and EP */
2148 PCIE_DBG_FS(dev,
2149 "\n\nPCIe: RC%d: dumping RC shadow registers\n",
2150 dev->rc_idx);
2151 msm_pcie_shadow_dump(dev, true);
2152
2153 PCIE_DBG_FS(dev,
2154 "\n\nPCIe: RC%d: dumping EP shadow registers\n",
2155 dev->rc_idx);
2156 msm_pcie_shadow_dump(dev, false);
2157 break;
2158 case 5: /* disable L0s */
2159 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
2160 dev->rc_idx);
2161 msm_pcie_write_mask(dev->dm_core +
2162 PCIE20_CAP_LINKCTRLSTATUS,
2163 BIT(0), 0);
2164 msm_pcie_write_mask(dev->conf +
2165 ep_link_ctrlstts_offset,
2166 BIT(0), 0);
2167 if (dev->shadow_en) {
2168 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2169 readl_relaxed(dev->dm_core +
2170 PCIE20_CAP_LINKCTRLSTATUS);
2171 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2172 readl_relaxed(dev->conf +
2173 ep_link_ctrlstts_offset);
2174 }
2175 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2176 readl_relaxed(dev->dm_core +
2177 PCIE20_CAP_LINKCTRLSTATUS));
2178 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2179 readl_relaxed(dev->conf +
2180 ep_link_ctrlstts_offset));
2181 break;
2182 case 6: /* enable L0s */
2183 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
2184 dev->rc_idx);
2185 msm_pcie_write_mask(dev->dm_core +
2186 PCIE20_CAP_LINKCTRLSTATUS,
2187 0, BIT(0));
2188 msm_pcie_write_mask(dev->conf +
2189 ep_link_ctrlstts_offset,
2190 0, BIT(0));
2191 if (dev->shadow_en) {
2192 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2193 readl_relaxed(dev->dm_core +
2194 PCIE20_CAP_LINKCTRLSTATUS);
2195 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2196 readl_relaxed(dev->conf +
2197 ep_link_ctrlstts_offset);
2198 }
2199 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2200 readl_relaxed(dev->dm_core +
2201 PCIE20_CAP_LINKCTRLSTATUS));
2202 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2203 readl_relaxed(dev->conf +
2204 ep_link_ctrlstts_offset));
2205 break;
2206 case 7: /* disable L1 */
2207 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
2208 dev->rc_idx);
2209 msm_pcie_write_mask(dev->dm_core +
2210 PCIE20_CAP_LINKCTRLSTATUS,
2211 BIT(1), 0);
2212 msm_pcie_write_mask(dev->conf +
2213 ep_link_ctrlstts_offset,
2214 BIT(1), 0);
2215 if (dev->shadow_en) {
2216 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2217 readl_relaxed(dev->dm_core +
2218 PCIE20_CAP_LINKCTRLSTATUS);
2219 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2220 readl_relaxed(dev->conf +
2221 ep_link_ctrlstts_offset);
2222 }
2223 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2224 readl_relaxed(dev->dm_core +
2225 PCIE20_CAP_LINKCTRLSTATUS));
2226 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2227 readl_relaxed(dev->conf +
2228 ep_link_ctrlstts_offset));
2229 break;
2230 case 8: /* enable L1 */
2231 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
2232 dev->rc_idx);
2233 msm_pcie_write_mask(dev->dm_core +
2234 PCIE20_CAP_LINKCTRLSTATUS,
2235 0, BIT(1));
2236 msm_pcie_write_mask(dev->conf +
2237 ep_link_ctrlstts_offset,
2238 0, BIT(1));
2239 if (dev->shadow_en) {
2240 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2241 readl_relaxed(dev->dm_core +
2242 PCIE20_CAP_LINKCTRLSTATUS);
2243 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2244 readl_relaxed(dev->conf +
2245 ep_link_ctrlstts_offset);
2246 }
2247 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2248 readl_relaxed(dev->dm_core +
2249 PCIE20_CAP_LINKCTRLSTATUS));
2250 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2251 readl_relaxed(dev->conf +
2252 ep_link_ctrlstts_offset));
2253 break;
2254 case 9: /* disable L1ss */
2255 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
2256 dev->rc_idx);
2257 current_offset = PCIE_EXT_CAP_OFFSET;
2258 while (current_offset) {
2259 val = readl_relaxed(dev->conf + current_offset);
2260 if ((val & 0xffff) == L1SUB_CAP_ID) {
2261 ep_l1sub_ctrl1_offset =
2262 current_offset + 0x8;
2263 break;
2264 }
2265 current_offset = val >> 20;
2266 }
2267 if (!ep_l1sub_ctrl1_offset) {
2268 PCIE_DBG_FS(dev,
2269 "PCIe: RC%d endpoint does not support l1ss registers\n",
2270 dev->rc_idx);
2271 break;
2272 }
2273
2274 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2275 dev->rc_idx, ep_l1sub_ctrl1_offset);
2276
2277 msm_pcie_write_reg_field(dev->dm_core,
2278 PCIE20_L1SUB_CONTROL1,
2279 0xf, 0);
2280 msm_pcie_write_mask(dev->dm_core +
2281 PCIE20_DEVICE_CONTROL2_STATUS2,
2282 BIT(10), 0);
2283 msm_pcie_write_reg_field(dev->conf,
2284 ep_l1sub_ctrl1_offset,
2285 0xf, 0);
2286 msm_pcie_write_mask(dev->conf +
2287 ep_dev_ctrl2stts2_offset,
2288 BIT(10), 0);
2289 if (dev->shadow_en) {
2290 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2291 readl_relaxed(dev->dm_core +
2292 PCIE20_L1SUB_CONTROL1);
2293 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2294 readl_relaxed(dev->dm_core +
2295 PCIE20_DEVICE_CONTROL2_STATUS2);
2296 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2297 readl_relaxed(dev->conf +
2298 ep_l1sub_ctrl1_offset);
2299 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2300 readl_relaxed(dev->conf +
2301 ep_dev_ctrl2stts2_offset);
2302 }
2303 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2304 readl_relaxed(dev->dm_core +
2305 PCIE20_L1SUB_CONTROL1));
2306 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2307 readl_relaxed(dev->dm_core +
2308 PCIE20_DEVICE_CONTROL2_STATUS2));
2309 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2310 readl_relaxed(dev->conf +
2311 ep_l1sub_ctrl1_offset));
2312 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2313 readl_relaxed(dev->conf +
2314 ep_dev_ctrl2stts2_offset));
2315 break;
2316 case 10: /* enable L1ss */
2317 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
2318 dev->rc_idx);
2319 current_offset = PCIE_EXT_CAP_OFFSET;
2320 while (current_offset) {
2321 val = readl_relaxed(dev->conf + current_offset);
2322 if ((val & 0xffff) == L1SUB_CAP_ID) {
2323 ep_l1sub_cap_reg1_offset =
2324 current_offset + 0x4;
2325 ep_l1sub_ctrl1_offset =
2326 current_offset + 0x8;
2327 break;
2328 }
2329 current_offset = val >> 20;
2330 }
2331 if (!ep_l1sub_ctrl1_offset) {
2332 PCIE_DBG_FS(dev,
2333 "PCIe: RC%d endpoint does not support l1ss registers\n",
2334 dev->rc_idx);
2335 break;
2336 }
2337
2338 val = readl_relaxed(dev->conf +
2339 ep_l1sub_cap_reg1_offset);
2340
2341 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
2342 val);
2343 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2344 dev->rc_idx, ep_l1sub_ctrl1_offset);
2345
2346 val &= 0xf;
2347
2348 msm_pcie_write_reg_field(dev->dm_core,
2349 PCIE20_L1SUB_CONTROL1,
2350 0xf, val);
2351 msm_pcie_write_mask(dev->dm_core +
2352 PCIE20_DEVICE_CONTROL2_STATUS2,
2353 0, BIT(10));
2354 msm_pcie_write_reg_field(dev->conf,
2355 ep_l1sub_ctrl1_offset,
2356 0xf, val);
2357 msm_pcie_write_mask(dev->conf +
2358 ep_dev_ctrl2stts2_offset,
2359 0, BIT(10));
2360 if (dev->shadow_en) {
2361 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2362 readl_relaxed(dev->dm_core +
2363 PCIE20_L1SUB_CONTROL1);
2364 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2365 readl_relaxed(dev->dm_core +
2366 PCIE20_DEVICE_CONTROL2_STATUS2);
2367 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2368 readl_relaxed(dev->conf +
2369 ep_l1sub_ctrl1_offset);
2370 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2371 readl_relaxed(dev->conf +
2372 ep_dev_ctrl2stts2_offset);
2373 }
2374 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2375 readl_relaxed(dev->dm_core +
2376 PCIE20_L1SUB_CONTROL1));
2377 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2378 readl_relaxed(dev->dm_core +
2379 PCIE20_DEVICE_CONTROL2_STATUS2));
2380 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2381 readl_relaxed(dev->conf +
2382 ep_l1sub_ctrl1_offset));
2383 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2384 readl_relaxed(dev->conf +
2385 ep_dev_ctrl2stts2_offset));
2386 break;
2387 case 11: /* enumerate PCIe */
2388 PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
2389 dev->rc_idx);
2390 if (dev->enumerated)
2391 PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
2392 dev->rc_idx);
2393 else {
2394 if (!msm_pcie_enumerate(dev->rc_idx))
2395 PCIE_DBG_FS(dev,
2396 "PCIe: RC%d is successfully enumerated\n",
2397 dev->rc_idx);
2398 else
2399 PCIE_DBG_FS(dev,
2400 "PCIe: RC%d enumeration failed\n",
2401 dev->rc_idx);
2402 }
2403 break;
2404 case 12: /* write a value to a register */
2405 PCIE_DBG_FS(dev,
2406 "\n\nPCIe: RC%d: writing a value to a register\n\n",
2407 dev->rc_idx);
2408
2409 if (!base_sel) {
2410 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2411 break;
2412 }
2413
2414 PCIE_DBG_FS(dev,
2415 "base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
2416 dev->res[base_sel - 1].name,
2417 dev->res[base_sel - 1].base,
2418 wr_offset, wr_mask, wr_value);
2419
2420 msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
2421 wr_offset, wr_mask, wr_value);
2422
2423 break;
2424 case 13: /* dump all registers of base_sel */
2425 if (!base_sel) {
2426 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2427 break;
2428 } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
2429 pcie_parf_dump(dev);
2430 break;
2431 } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
2432 pcie_phy_dump(dev);
2433 break;
2434 } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
2435 base_sel_size = 0x1000;
2436 } else {
2437 base_sel_size = resource_size(
2438 dev->res[base_sel - 1].resource);
2439 }
2440
2441 PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
2442 dev->res[base_sel - 1].name, dev->rc_idx);
2443
2444 for (i = 0; i < base_sel_size; i += 32) {
2445 PCIE_DBG_FS(dev,
2446 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2447 i, readl_relaxed(dev->res[base_sel - 1].base + i),
2448 readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
2449 readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
2450 readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
2451 readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
2452 readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
2453 readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
2454 readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
2455 }
2456 break;
2457 default:
2458 PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
2459 break;
2460 }
2461}
2462
2463int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
2464 u32 offset, u32 mask, u32 value)
2465{
2466 int ret = 0;
2467 struct msm_pcie_dev_t *pdev = NULL;
2468
2469 if (!dev) {
2470 pr_err("PCIe: the input pci dev is NULL.\n");
2471 return -ENODEV;
2472 }
2473
2474 if (option == 12 || option == 13) {
2475 if (!base || base > 5) {
2476 PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
2477 PCIE_DBG_FS(pdev,
2478 "PCIe: base_sel is still 0x%x\n", base_sel);
2479 return -EINVAL;
2480 }
2481
2482 base_sel = base;
2483 PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
2484
2485 if (option == 12) {
2486 wr_offset = offset;
2487 wr_mask = mask;
2488 wr_value = value;
2489
2490 PCIE_DBG_FS(pdev,
2491 "PCIe: wr_offset is now 0x%x\n", wr_offset);
2492 PCIE_DBG_FS(pdev,
2493 "PCIe: wr_mask is now 0x%x\n", wr_mask);
2494 PCIE_DBG_FS(pdev,
2495 "PCIe: wr_value is now 0x%x\n", wr_value);
2496 }
2497 }
2498
2499 pdev = PCIE_BUS_PRIV_DATA(dev->bus);
2500 rc_sel = 1 << pdev->rc_idx;
2501
2502 msm_pcie_sel_debug_testcase(pdev, option);
2503
2504 return ret;
2505}
2506EXPORT_SYMBOL(msm_pcie_debug_info);
2507
2508#ifdef CONFIG_DEBUG_FS
2509static struct dentry *dent_msm_pcie;
2510static struct dentry *dfile_rc_sel;
2511static struct dentry *dfile_case;
2512static struct dentry *dfile_base_sel;
2513static struct dentry *dfile_linkdown_panic;
2514static struct dentry *dfile_wr_offset;
2515static struct dentry *dfile_wr_mask;
2516static struct dentry *dfile_wr_value;
2517static struct dentry *dfile_ep_wakeirq;
2518static struct dentry *dfile_aer_enable;
2519static struct dentry *dfile_corr_counter_limit;
2520
2521static u32 rc_sel_max;
2522
2523static ssize_t msm_pcie_cmd_debug(struct file *file,
2524 const char __user *buf,
2525 size_t count, loff_t *ppos)
2526{
2527 unsigned long ret;
2528 char str[MAX_MSG_LEN];
2529 unsigned int testcase = 0;
2530 int i;
2531
2532 memset(str, 0, sizeof(str));
2533 ret = copy_from_user(str, buf, sizeof(str));
2534 if (ret)
2535 return -EFAULT;
2536
2537 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2538 testcase = (testcase * 10) + (str[i] - '0');
2539
2540 if (!rc_sel)
2541 rc_sel = 1;
2542
2543 pr_alert("PCIe: TEST: %d\n", testcase);
2544
2545 for (i = 0; i < MAX_RC_NUM; i++) {
2546 if (!((rc_sel >> i) & 0x1))
2547 continue;
2548 msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
2549 }
2550
2551 return count;
2552}
2553
2554const struct file_operations msm_pcie_cmd_debug_ops = {
2555 .write = msm_pcie_cmd_debug,
2556};
2557
2558static ssize_t msm_pcie_set_rc_sel(struct file *file,
2559 const char __user *buf,
2560 size_t count, loff_t *ppos)
2561{
2562 unsigned long ret;
2563 char str[MAX_MSG_LEN];
2564 int i;
2565 u32 new_rc_sel = 0;
2566
2567 memset(str, 0, sizeof(str));
2568 ret = copy_from_user(str, buf, sizeof(str));
2569 if (ret)
2570 return -EFAULT;
2571
2572 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2573 new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
2574
2575 if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
2576 pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
2577 pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
2578 } else {
2579 rc_sel = new_rc_sel;
2580 pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
2581 }
2582
2583 pr_alert("PCIe: the following RC(s) will be tested:\n");
2584 for (i = 0; i < MAX_RC_NUM; i++) {
2585 if (!rc_sel) {
2586 pr_alert("RC %d\n", i);
2587 break;
2588 } else if (rc_sel & (1 << i)) {
2589 pr_alert("RC %d\n", i);
2590 }
2591 }
2592
2593 return count;
2594}
2595
2596const struct file_operations msm_pcie_rc_sel_ops = {
2597 .write = msm_pcie_set_rc_sel,
2598};
2599
2600static ssize_t msm_pcie_set_base_sel(struct file *file,
2601 const char __user *buf,
2602 size_t count, loff_t *ppos)
2603{
2604 unsigned long ret;
2605 char str[MAX_MSG_LEN];
2606 int i;
2607 u32 new_base_sel = 0;
2608 char *base_sel_name;
2609
2610 memset(str, 0, sizeof(str));
2611 ret = copy_from_user(str, buf, sizeof(str));
2612 if (ret)
2613 return -EFAULT;
2614
2615 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2616 new_base_sel = (new_base_sel * 10) + (str[i] - '0');
2617
2618 if (!new_base_sel || new_base_sel > 5) {
2619 pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
2620 new_base_sel);
2621 pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
2622 } else {
2623 base_sel = new_base_sel;
2624 pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
2625 }
2626
2627 switch (base_sel) {
2628 case 1:
2629 base_sel_name = "PARF";
2630 break;
2631 case 2:
2632 base_sel_name = "PHY";
2633 break;
2634 case 3:
2635 base_sel_name = "RC CONFIG SPACE";
2636 break;
2637 case 4:
2638 base_sel_name = "ELBI";
2639 break;
2640 case 5:
2641 base_sel_name = "EP CONFIG SPACE";
2642 break;
2643 default:
2644 base_sel_name = "INVALID";
2645 break;
2646 }
2647
2648 pr_alert("%s\n", base_sel_name);
2649
2650 return count;
2651}
2652
2653const struct file_operations msm_pcie_base_sel_ops = {
2654 .write = msm_pcie_set_base_sel,
2655};
2656
2657static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
2658 const char __user *buf,
2659 size_t count, loff_t *ppos)
2660{
2661 unsigned long ret;
2662 char str[MAX_MSG_LEN];
2663 u32 new_linkdown_panic = 0;
2664 int i;
2665
2666 memset(str, 0, sizeof(str));
2667 ret = copy_from_user(str, buf, sizeof(str));
2668 if (ret)
2669 return -EFAULT;
2670
2671 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2672 new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
2673
2674 if (new_linkdown_panic <= 1) {
2675 for (i = 0; i < MAX_RC_NUM; i++) {
2676 if (!rc_sel) {
2677 msm_pcie_dev[0].linkdown_panic =
2678 new_linkdown_panic;
2679 PCIE_DBG_FS(&msm_pcie_dev[0],
2680 "PCIe: RC0: linkdown_panic is now %d\n",
2681 msm_pcie_dev[0].linkdown_panic);
2682 break;
2683 } else if (rc_sel & (1 << i)) {
2684 msm_pcie_dev[i].linkdown_panic =
2685 new_linkdown_panic;
2686 PCIE_DBG_FS(&msm_pcie_dev[i],
2687 "PCIe: RC%d: linkdown_panic is now %d\n",
2688 i, msm_pcie_dev[i].linkdown_panic);
2689 }
2690 }
2691 } else {
2692 pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
2693 new_linkdown_panic);
2694 }
2695
2696 return count;
2697}
2698
2699const struct file_operations msm_pcie_linkdown_panic_ops = {
2700 .write = msm_pcie_set_linkdown_panic,
2701};
2702
2703static ssize_t msm_pcie_set_wr_offset(struct file *file,
2704 const char __user *buf,
2705 size_t count, loff_t *ppos)
2706{
2707 unsigned long ret;
2708 char str[MAX_MSG_LEN];
2709 int i;
2710
2711 memset(str, 0, sizeof(str));
2712 ret = copy_from_user(str, buf, sizeof(str));
2713 if (ret)
2714 return -EFAULT;
2715
2716 wr_offset = 0;
2717 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2718 wr_offset = (wr_offset * 10) + (str[i] - '0');
2719
2720 pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
2721
2722 return count;
2723}
2724
2725const struct file_operations msm_pcie_wr_offset_ops = {
2726 .write = msm_pcie_set_wr_offset,
2727};
2728
2729static ssize_t msm_pcie_set_wr_mask(struct file *file,
2730 const char __user *buf,
2731 size_t count, loff_t *ppos)
2732{
2733 unsigned long ret;
2734 char str[MAX_MSG_LEN];
2735 int i;
2736
2737 memset(str, 0, sizeof(str));
2738 ret = copy_from_user(str, buf, sizeof(str));
2739 if (ret)
2740 return -EFAULT;
2741
2742 wr_mask = 0;
2743 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2744 wr_mask = (wr_mask * 10) + (str[i] - '0');
2745
2746 pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
2747
2748 return count;
2749}
2750
2751const struct file_operations msm_pcie_wr_mask_ops = {
2752 .write = msm_pcie_set_wr_mask,
2753};
2754static ssize_t msm_pcie_set_wr_value(struct file *file,
2755 const char __user *buf,
2756 size_t count, loff_t *ppos)
2757{
2758 unsigned long ret;
2759 char str[MAX_MSG_LEN];
2760 int i;
2761
2762 memset(str, 0, sizeof(str));
2763 ret = copy_from_user(str, buf, sizeof(str));
2764 if (ret)
2765 return -EFAULT;
2766
2767 wr_value = 0;
2768 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2769 wr_value = (wr_value * 10) + (str[i] - '0');
2770
2771 pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
2772
2773 return count;
2774}
2775
2776const struct file_operations msm_pcie_wr_value_ops = {
2777 .write = msm_pcie_set_wr_value,
2778};
2779
2780static ssize_t msm_pcie_set_ep_wakeirq(struct file *file,
2781 const char __user *buf,
2782 size_t count, loff_t *ppos)
2783{
2784 unsigned long ret;
2785 char str[MAX_MSG_LEN];
2786 u32 new_ep_wakeirq = 0;
2787 int i;
2788
2789 memset(str, 0, sizeof(str));
2790 ret = copy_from_user(str, buf, sizeof(str));
2791 if (ret)
2792 return -EFAULT;
2793
2794 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2795 new_ep_wakeirq = (new_ep_wakeirq * 10) + (str[i] - '0');
2796
2797 if (new_ep_wakeirq <= 1) {
2798 for (i = 0; i < MAX_RC_NUM; i++) {
2799 if (!rc_sel) {
2800 msm_pcie_dev[0].ep_wakeirq = new_ep_wakeirq;
2801 PCIE_DBG_FS(&msm_pcie_dev[0],
2802 "PCIe: RC0: ep_wakeirq is now %d\n",
2803 msm_pcie_dev[0].ep_wakeirq);
2804 break;
2805 } else if (rc_sel & (1 << i)) {
2806 msm_pcie_dev[i].ep_wakeirq = new_ep_wakeirq;
2807 PCIE_DBG_FS(&msm_pcie_dev[i],
2808 "PCIe: RC%d: ep_wakeirq is now %d\n",
2809 i, msm_pcie_dev[i].ep_wakeirq);
2810 }
2811 }
2812 } else {
2813 pr_err("PCIe: Invalid input for ep_wakeirq: %d. Please enter 0 or 1.\n",
2814 new_ep_wakeirq);
2815 }
2816
2817 return count;
2818}
2819
2820const struct file_operations msm_pcie_ep_wakeirq_ops = {
2821 .write = msm_pcie_set_ep_wakeirq,
2822};
2823
2824static ssize_t msm_pcie_set_aer_enable(struct file *file,
2825 const char __user *buf,
2826 size_t count, loff_t *ppos)
2827{
2828 unsigned long ret;
2829 char str[MAX_MSG_LEN];
2830 u32 new_aer_enable = 0;
2831 u32 temp_rc_sel;
2832 int i;
2833
2834 memset(str, 0, sizeof(str));
2835 ret = copy_from_user(str, buf, sizeof(str));
2836 if (ret)
2837 return -EFAULT;
2838
2839 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2840 new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
2841
2842 if (new_aer_enable > 1) {
2843 pr_err(
2844 "PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
2845 new_aer_enable);
2846 return count;
2847 }
2848
2849 if (rc_sel)
2850 temp_rc_sel = rc_sel;
2851 else
2852 temp_rc_sel = 0x1;
2853
2854 for (i = 0; i < MAX_RC_NUM; i++) {
2855 if (temp_rc_sel & (1 << i)) {
2856 msm_pcie_dev[i].aer_enable = new_aer_enable;
2857 PCIE_DBG_FS(&msm_pcie_dev[i],
2858 "PCIe: RC%d: aer_enable is now %d\n",
2859 i, msm_pcie_dev[i].aer_enable);
2860
2861 msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
2862 PCIE20_BRIDGE_CTRL,
2863 new_aer_enable ? 0 : BIT(16),
2864 new_aer_enable ? BIT(16) : 0);
2865
2866 PCIE_DBG_FS(&msm_pcie_dev[i],
2867 "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
2868 readl_relaxed(msm_pcie_dev[i].dm_core +
2869 PCIE20_BRIDGE_CTRL));
2870 }
2871 }
2872
2873 return count;
2874}
2875
2876const struct file_operations msm_pcie_aer_enable_ops = {
2877 .write = msm_pcie_set_aer_enable,
2878};
2879
2880static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
2881 const char __user *buf,
2882 size_t count, loff_t *ppos)
2883{
2884 unsigned long ret;
2885 char str[MAX_MSG_LEN];
2886 int i;
2887
2888 memset(str, 0, sizeof(str));
2889 ret = copy_from_user(str, buf, sizeof(str));
2890 if (ret)
2891 return -EFAULT;
2892
2893 corr_counter_limit = 0;
2894 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2895 corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
2896
2897 pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
2898
2899 return count;
2900}
2901
2902const struct file_operations msm_pcie_corr_counter_limit_ops = {
2903 .write = msm_pcie_set_corr_counter_limit,
2904};
2905
2906static void msm_pcie_debugfs_init(void)
2907{
2908 rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
2909 wr_mask = 0xffffffff;
2910
2911 dent_msm_pcie = debugfs_create_dir("pci-msm", 0);
2912 if (IS_ERR(dent_msm_pcie)) {
2913 pr_err("PCIe: fail to create the folder for debug_fs.\n");
2914 return;
2915 }
2916
2917 dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
2918 dent_msm_pcie, 0,
2919 &msm_pcie_rc_sel_ops);
2920 if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
2921 pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
2922 goto rc_sel_error;
2923 }
2924
2925 dfile_case = debugfs_create_file("case", 0664,
2926 dent_msm_pcie, 0,
2927 &msm_pcie_cmd_debug_ops);
2928 if (!dfile_case || IS_ERR(dfile_case)) {
2929 pr_err("PCIe: fail to create the file for debug_fs case.\n");
2930 goto case_error;
2931 }
2932
2933 dfile_base_sel = debugfs_create_file("base_sel", 0664,
2934 dent_msm_pcie, 0,
2935 &msm_pcie_base_sel_ops);
2936 if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
2937 pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
2938 goto base_sel_error;
2939 }
2940
2941 dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
2942 dent_msm_pcie, 0,
2943 &msm_pcie_linkdown_panic_ops);
2944 if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
2945 pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
2946 goto linkdown_panic_error;
2947 }
2948
2949 dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
2950 dent_msm_pcie, 0,
2951 &msm_pcie_wr_offset_ops);
2952 if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
2953 pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
2954 goto wr_offset_error;
2955 }
2956
2957 dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
2958 dent_msm_pcie, 0,
2959 &msm_pcie_wr_mask_ops);
2960 if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
2961 pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
2962 goto wr_mask_error;
2963 }
2964
2965 dfile_wr_value = debugfs_create_file("wr_value", 0664,
2966 dent_msm_pcie, 0,
2967 &msm_pcie_wr_value_ops);
2968 if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
2969 pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
2970 goto wr_value_error;
2971 }
2972
2973 dfile_ep_wakeirq = debugfs_create_file("ep_wakeirq", 0664,
2974 dent_msm_pcie, 0,
2975 &msm_pcie_ep_wakeirq_ops);
2976 if (!dfile_ep_wakeirq || IS_ERR(dfile_ep_wakeirq)) {
2977 pr_err("PCIe: fail to create the file for debug_fs ep_wakeirq.\n");
2978 goto ep_wakeirq_error;
2979 }
2980
2981 dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
2982 dent_msm_pcie, 0,
2983 &msm_pcie_aer_enable_ops);
2984 if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
2985 pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
2986 goto aer_enable_error;
2987 }
2988
2989 dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
2990 0664, dent_msm_pcie, 0,
2991 &msm_pcie_corr_counter_limit_ops);
2992 if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
2993 pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
2994 goto corr_counter_limit_error;
2995 }
2996 return;
2997
2998corr_counter_limit_error:
2999 debugfs_remove(dfile_aer_enable);
3000aer_enable_error:
3001 debugfs_remove(dfile_ep_wakeirq);
3002ep_wakeirq_error:
3003 debugfs_remove(dfile_wr_value);
3004wr_value_error:
3005 debugfs_remove(dfile_wr_mask);
3006wr_mask_error:
3007 debugfs_remove(dfile_wr_offset);
3008wr_offset_error:
3009 debugfs_remove(dfile_linkdown_panic);
3010linkdown_panic_error:
3011 debugfs_remove(dfile_base_sel);
3012base_sel_error:
3013 debugfs_remove(dfile_case);
3014case_error:
3015 debugfs_remove(dfile_rc_sel);
3016rc_sel_error:
3017 debugfs_remove(dent_msm_pcie);
3018}
3019
3020static void msm_pcie_debugfs_exit(void)
3021{
3022 debugfs_remove(dfile_rc_sel);
3023 debugfs_remove(dfile_case);
3024 debugfs_remove(dfile_base_sel);
3025 debugfs_remove(dfile_linkdown_panic);
3026 debugfs_remove(dfile_wr_offset);
3027 debugfs_remove(dfile_wr_mask);
3028 debugfs_remove(dfile_wr_value);
3029 debugfs_remove(dfile_ep_wakeirq);
3030 debugfs_remove(dfile_aer_enable);
3031 debugfs_remove(dfile_corr_counter_limit);
3032}
3033#else
3034static void msm_pcie_debugfs_init(void)
3035{
3036}
3037
3038static void msm_pcie_debugfs_exit(void)
3039{
3040}
3041#endif
3042
3043static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
3044{
3045 return readl_relaxed(dev->dm_core +
3046 PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
3047}
3048
3049/**
3050 * msm_pcie_iatu_config - configure outbound address translation region
3051 * @dev: root commpex
3052 * @nr: region number
3053 * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
3054 * @host_addr: - region start address on host
3055 * @host_end: - region end address (low 32 bit) on host,
3056 * upper 32 bits are same as for @host_addr
3057 * @target_addr: - region start address on target
3058 */
3059static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
3060 unsigned long host_addr, u32 host_end,
3061 unsigned long target_addr)
3062{
3063 void __iomem *pcie20 = dev->dm_core;
3064
3065 if (dev->shadow_en) {
3066 dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
3067 nr;
3068 dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
3069 type;
3070 dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
3071 lower_32_bits(host_addr);
3072 dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
3073 upper_32_bits(host_addr);
3074 dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
3075 host_end;
3076 dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
3077 lower_32_bits(target_addr);
3078 dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
3079 upper_32_bits(target_addr);
3080 dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
3081 BIT(31);
3082 }
3083
3084 /* select region */
3085 writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
3086 /* ensure that hardware locks it */
3087 wmb();
3088
3089 /* switch off region before changing it */
3090 writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
3091 /* and wait till it propagates to the hardware */
3092 wmb();
3093
3094 writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
3095 writel_relaxed(lower_32_bits(host_addr),
3096 pcie20 + PCIE20_PLR_IATU_LBAR);
3097 writel_relaxed(upper_32_bits(host_addr),
3098 pcie20 + PCIE20_PLR_IATU_UBAR);
3099 writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
3100 writel_relaxed(lower_32_bits(target_addr),
3101 pcie20 + PCIE20_PLR_IATU_LTAR);
3102 writel_relaxed(upper_32_bits(target_addr),
3103 pcie20 + PCIE20_PLR_IATU_UTAR);
3104 /* ensure that changes propagated to the hardware */
3105 wmb();
3106 writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
3107
3108 /* ensure that changes propagated to the hardware */
3109 wmb();
3110
3111 if (dev->enumerated) {
3112 PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
3113 dev->pcidev_table[nr].bdf >> 24,
3114 dev->pcidev_table[nr].bdf >> 19 & 0x1f,
3115 dev->pcidev_table[nr].bdf >> 16 & 0x07);
3116 PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
3117 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
3118 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
3119 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
3120 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
3121 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
3122 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
3123 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
3124 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
3125 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
3126 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
3127 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
3128 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
3129 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
3130 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
3131 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
3132 }
3133}
3134
3135/**
3136 * msm_pcie_cfg_bdf - configure for config access
3137 * @dev: root commpex
3138 * @bus: PCI bus number
3139 * @devfn: PCI dev and function number
3140 *
3141 * Remap if required region 0 for config access of proper type
3142 * (CFG0 for bus 1, CFG1 for other buses)
3143 * Cache current device bdf for speed-up
3144 */
3145static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
3146{
3147 struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
3148 u32 bdf = BDF_OFFSET(bus, devfn);
3149 u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3150
3151 if (dev->current_bdf == bdf)
3152 return;
3153
3154 msm_pcie_iatu_config(dev, 0, type,
3155 axi_conf->start,
3156 axi_conf->start + SZ_4K - 1,
3157 bdf);
3158
3159 dev->current_bdf = bdf;
3160}
3161
3162static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
3163 u32 word_offset, u32 wr_val,
3164 u32 bdf, bool rc)
3165{
3166 int i, j;
3167 u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
3168
3169 if (rc) {
3170 dev->rc_shadow[word_offset / 4] = wr_val;
3171 } else {
3172 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3173 if (!dev->pcidev_table[i].bdf) {
3174 for (j = 0; j < max_dev; j++)
3175 if (!msm_pcie_dev_tbl[j].bdf) {
3176 msm_pcie_dev_tbl[j].bdf = bdf;
3177 break;
3178 }
3179 dev->pcidev_table[i].bdf = bdf;
3180 if ((!dev->bridge_found) && (i > 0))
3181 dev->bridge_found = true;
3182 }
3183 if (dev->pcidev_table[i].bdf == bdf) {
3184 dev->ep_shadow[i][word_offset / 4] = wr_val;
3185 break;
3186 }
3187 }
3188 }
3189}
3190
3191static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
3192 int where, int size, u32 *val)
3193{
3194 uint32_t word_offset, byte_offset, mask;
3195 uint32_t rd_val, wr_val;
3196 struct msm_pcie_dev_t *dev;
3197 void __iomem *config_base;
3198 bool rc = false;
3199 u32 rc_idx;
3200 int rv = 0;
3201 u32 bdf = BDF_OFFSET(bus->number, devfn);
3202 int i;
3203
3204 dev = PCIE_BUS_PRIV_DATA(bus);
3205
3206 if (!dev) {
3207 pr_err("PCIe: No device found for this bus.\n");
3208 *val = ~0;
3209 rv = PCIBIOS_DEVICE_NOT_FOUND;
3210 goto out;
3211 }
3212
3213 rc_idx = dev->rc_idx;
3214 rc = (bus->number == 0);
3215
3216 spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
3217
3218 if (!dev->cfg_access) {
3219 PCIE_DBG3(dev,
3220 "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
3221 rc_idx, bus->number, devfn, where, size);
3222 *val = ~0;
3223 rv = PCIBIOS_DEVICE_NOT_FOUND;
3224 goto unlock;
3225 }
3226
3227 if (rc && (devfn != 0)) {
3228 PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
3229 (oper == RD) ? "rd" : "wr", bus->number, devfn);
3230 *val = ~0;
3231 rv = PCIBIOS_DEVICE_NOT_FOUND;
3232 goto unlock;
3233 }
3234
3235 if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
3236 PCIE_DBG3(dev,
3237 "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
3238 rc_idx, bus->number, devfn, where, size);
3239 *val = ~0;
3240 rv = PCIBIOS_DEVICE_NOT_FOUND;
3241 goto unlock;
3242 }
3243
3244 /* check if the link is up for endpoint */
3245 if (!rc && !msm_pcie_is_link_up(dev)) {
3246 PCIE_ERR(dev,
3247 "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
3248 rc_idx, (oper == RD) ? "rd" : "wr",
3249 bus->number, devfn);
3250 *val = ~0;
3251 rv = PCIBIOS_DEVICE_NOT_FOUND;
3252 goto unlock;
3253 }
3254
3255 if (!rc && !dev->enumerated)
3256 msm_pcie_cfg_bdf(dev, bus->number, devfn);
3257
3258 word_offset = where & ~0x3;
3259 byte_offset = where & 0x3;
3260 mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset);
3261
3262 if (rc || !dev->enumerated) {
3263 config_base = rc ? dev->dm_core : dev->conf;
3264 } else {
3265 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3266 if (dev->pcidev_table[i].bdf == bdf) {
3267 config_base = dev->pcidev_table[i].conf_base;
3268 break;
3269 }
3270 }
3271 if (i == MAX_DEVICE_NUM) {
3272 *val = ~0;
3273 rv = PCIBIOS_DEVICE_NOT_FOUND;
3274 goto unlock;
3275 }
3276 }
3277
3278 rd_val = readl_relaxed(config_base + word_offset);
3279
3280 if (oper == RD) {
3281 *val = ((rd_val & mask) >> (8 * byte_offset));
3282 PCIE_DBG3(dev,
3283 "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
3284 rc_idx, bus->number, devfn, where, size, *val, rd_val);
3285 } else {
3286 wr_val = (rd_val & ~mask) |
3287 ((*val << (8 * byte_offset)) & mask);
3288
3289 if ((bus->number == 0) && (where == 0x3c))
3290 wr_val = wr_val | (3 << 16);
3291
3292 writel_relaxed(wr_val, config_base + word_offset);
3293 wmb(); /* ensure config data is written to hardware register */
3294
3295 if (rd_val == PCIE_LINK_DOWN)
3296 PCIE_ERR(dev,
3297 "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
3298 rc_idx, bus->number, devfn, where, size);
3299 else if (dev->shadow_en)
3300 msm_pcie_save_shadow(dev, word_offset, wr_val, bdf, rc);
3301
3302 PCIE_DBG3(dev,
3303 "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
3304 rc_idx, bus->number, devfn, where, size,
3305 wr_val, rd_val, *val);
3306 }
3307
3308unlock:
3309 spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
3310out:
3311 return rv;
3312}
3313
3314static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
3315 int size, u32 *val)
3316{
3317 int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
3318
3319 if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
3320 *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
3321 PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
3322 }
3323
3324 return ret;
3325}
3326
3327static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
3328 int where, int size, u32 val)
3329{
3330 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
3331}
3332
3333static struct pci_ops msm_pcie_ops = {
3334 .read = msm_pcie_rd_conf,
3335 .write = msm_pcie_wr_conf,
3336};
3337
3338static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
3339{
3340 int rc = 0, i;
3341 struct msm_pcie_gpio_info_t *info;
3342
3343 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3344
3345 for (i = 0; i < dev->gpio_n; i++) {
3346 info = &dev->gpio[i];
3347
3348 if (!info->num)
3349 continue;
3350
3351 rc = gpio_request(info->num, info->name);
3352 if (rc) {
3353 PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
3354 dev->rc_idx, info->name, rc);
3355 break;
3356 }
3357
3358 if (info->out)
3359 rc = gpio_direction_output(info->num, info->init);
3360 else
3361 rc = gpio_direction_input(info->num);
3362 if (rc) {
3363 PCIE_ERR(dev,
3364 "PCIe: RC%d can't set direction for GPIO %s:%d\n",
3365 dev->rc_idx, info->name, rc);
3366 gpio_free(info->num);
3367 break;
3368 }
3369 }
3370
3371 if (rc)
3372 while (i--)
3373 gpio_free(dev->gpio[i].num);
3374
3375 return rc;
3376}
3377
3378static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
3379{
3380 int i;
3381
3382 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3383
3384 for (i = 0; i < dev->gpio_n; i++)
3385 gpio_free(dev->gpio[i].num);
3386}
3387
3388int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
3389{
3390 int i, rc = 0;
3391 struct regulator *vreg;
3392 struct msm_pcie_vreg_info_t *info;
3393
3394 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3395
3396 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
3397 info = &dev->vreg[i];
3398 vreg = info->hdl;
3399
3400 if (!vreg)
3401 continue;
3402
3403 PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
3404 dev->rc_idx, info->name);
3405 if (info->max_v) {
3406 rc = regulator_set_voltage(vreg,
3407 info->min_v, info->max_v);
3408 if (rc) {
3409 PCIE_ERR(dev,
3410 "PCIe: RC%d can't set voltage for %s: %d\n",
3411 dev->rc_idx, info->name, rc);
3412 break;
3413 }
3414 }
3415
3416 if (info->opt_mode) {
3417 rc = regulator_set_load(vreg, info->opt_mode);
3418 if (rc < 0) {
3419 PCIE_ERR(dev,
3420 "PCIe: RC%d can't set mode for %s: %d\n",
3421 dev->rc_idx, info->name, rc);
3422 break;
3423 }
3424 }
3425
3426 rc = regulator_enable(vreg);
3427 if (rc) {
3428 PCIE_ERR(dev,
3429 "PCIe: RC%d can't enable regulator %s: %d\n",
3430 dev->rc_idx, info->name, rc);
3431 break;
3432 }
3433 }
3434
3435 if (rc)
3436 while (i--) {
3437 struct regulator *hdl = dev->vreg[i].hdl;
3438
3439 if (hdl) {
3440 regulator_disable(hdl);
3441 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3442 PCIE_DBG(dev,
3443 "RC%d: Removing %s vote.\n",
3444 dev->rc_idx,
3445 dev->vreg[i].name);
3446 regulator_set_voltage(hdl,
3447 RPM_REGULATOR_CORNER_NONE,
3448 INT_MAX);
3449 }
3450 }
3451
3452 }
3453
3454 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3455
3456 return rc;
3457}
3458
3459static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
3460{
3461 int i;
3462
3463 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3464
3465 for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
3466 if (dev->vreg[i].hdl) {
3467 PCIE_DBG(dev, "Vreg %s is being disabled\n",
3468 dev->vreg[i].name);
3469 regulator_disable(dev->vreg[i].hdl);
3470
3471 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3472 PCIE_DBG(dev,
3473 "RC%d: Removing %s vote.\n",
3474 dev->rc_idx,
3475 dev->vreg[i].name);
3476 regulator_set_voltage(dev->vreg[i].hdl,
3477 RPM_REGULATOR_CORNER_NONE,
3478 INT_MAX);
3479 }
3480 }
3481 }
3482
3483 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3484}
3485
3486static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
3487{
3488 int i, rc = 0;
3489 struct msm_pcie_clk_info_t *info;
3490 struct msm_pcie_reset_info_t *reset_info;
3491
3492 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3493
3494 rc = regulator_enable(dev->gdsc);
3495
3496 if (rc) {
3497 PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
3498 dev->rc_idx, dev->pdev->name);
3499 return rc;
3500 }
3501
3502 if (dev->gdsc_smmu) {
3503 rc = regulator_enable(dev->gdsc_smmu);
3504
3505 if (rc) {
3506 PCIE_ERR(dev,
3507 "PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
3508 dev->rc_idx, dev->pdev->name);
3509 return rc;
3510 }
3511 }
3512
3513 PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
3514 if (dev->bus_client) {
3515 rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
3516 if (rc) {
3517 PCIE_ERR(dev,
3518 "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
3519 dev->rc_idx, rc);
3520 return rc;
3521 }
3522
3523 PCIE_DBG2(dev,
3524 "PCIe: set bus bandwidth for RC%d.\n",
3525 dev->rc_idx);
3526 }
3527
3528 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3529 info = &dev->clk[i];
3530
3531 if (!info->hdl)
3532 continue;
3533
3534 if (info->config_mem)
3535 msm_pcie_config_clock_mem(dev, info);
3536
3537 if (info->freq) {
3538 rc = clk_set_rate(info->hdl, info->freq);
3539 if (rc) {
3540 PCIE_ERR(dev,
3541 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3542 dev->rc_idx, info->name, rc);
3543 break;
3544 }
3545
3546 PCIE_DBG2(dev,
3547 "PCIe: RC%d set rate for clk %s.\n",
3548 dev->rc_idx, info->name);
3549 }
3550
3551 rc = clk_prepare_enable(info->hdl);
3552
3553 if (rc)
3554 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
3555 dev->rc_idx, info->name);
3556 else
3557 PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
3558 info->name, dev->rc_idx);
3559 }
3560
3561 if (rc) {
3562 PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
3563 dev->rc_idx);
3564 while (i--) {
3565 struct clk *hdl = dev->clk[i].hdl;
3566
3567 if (hdl)
3568 clk_disable_unprepare(hdl);
3569 }
3570
3571 if (dev->gdsc_smmu)
3572 regulator_disable(dev->gdsc_smmu);
3573
3574 regulator_disable(dev->gdsc);
3575 }
3576
3577 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3578 reset_info = &dev->reset[i];
3579 if (reset_info->hdl) {
3580 rc = reset_control_deassert(reset_info->hdl);
3581 if (rc)
3582 PCIE_ERR(dev,
3583 "PCIe: RC%d failed to deassert reset for %s.\n",
3584 dev->rc_idx, reset_info->name);
3585 else
3586 PCIE_DBG2(dev,
3587 "PCIe: RC%d successfully deasserted reset for %s.\n",
3588 dev->rc_idx, reset_info->name);
3589 }
3590 }
3591
3592 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3593
3594 return rc;
3595}
3596
3597static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
3598{
3599 int i;
3600 int rc;
3601
3602 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3603
3604 for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
3605 if (dev->clk[i].hdl)
3606 clk_disable_unprepare(dev->clk[i].hdl);
3607
3608 if (dev->bus_client) {
3609 PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
3610 dev->rc_idx);
3611
3612 rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
3613 if (rc)
3614 PCIE_ERR(dev,
3615 "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
3616 dev->rc_idx, rc);
3617 else
3618 PCIE_DBG(dev,
3619 "PCIe: relinquish bus bandwidth for RC%d.\n",
3620 dev->rc_idx);
3621 }
3622
3623 if (dev->gdsc_smmu)
3624 regulator_disable(dev->gdsc_smmu);
3625
3626 regulator_disable(dev->gdsc);
3627
3628 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3629}
3630
3631static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
3632{
3633 int i, rc = 0;
3634 struct msm_pcie_clk_info_t *info;
3635 struct msm_pcie_reset_info_t *pipe_reset_info;
3636
3637 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3638
3639 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3640 info = &dev->pipeclk[i];
3641
3642 if (!info->hdl)
3643 continue;
3644
3645
3646 if (info->config_mem)
3647 msm_pcie_config_clock_mem(dev, info);
3648
3649 if (info->freq) {
3650 rc = clk_set_rate(info->hdl, info->freq);
3651 if (rc) {
3652 PCIE_ERR(dev,
3653 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3654 dev->rc_idx, info->name, rc);
3655 break;
3656 }
3657
3658 PCIE_DBG2(dev,
3659 "PCIe: RC%d set rate for clk %s: %d.\n",
3660 dev->rc_idx, info->name, rc);
3661 }
3662
3663 rc = clk_prepare_enable(info->hdl);
3664
3665 if (rc)
3666 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
3667 dev->rc_idx, info->name);
3668 else
3669 PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
3670 dev->rc_idx, info->name);
3671 }
3672
3673 if (rc) {
3674 PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
3675 dev->rc_idx);
3676 while (i--)
3677 if (dev->pipeclk[i].hdl)
3678 clk_disable_unprepare(dev->pipeclk[i].hdl);
3679 }
3680
3681 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3682 pipe_reset_info = &dev->pipe_reset[i];
3683 if (pipe_reset_info->hdl) {
3684 rc = reset_control_deassert(
3685 pipe_reset_info->hdl);
3686 if (rc)
3687 PCIE_ERR(dev,
3688 "PCIe: RC%d failed to deassert pipe reset for %s.\n",
3689 dev->rc_idx, pipe_reset_info->name);
3690 else
3691 PCIE_DBG2(dev,
3692 "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
3693 dev->rc_idx, pipe_reset_info->name);
3694 }
3695 }
3696
3697 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3698
3699 return rc;
3700}
3701
3702static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
3703{
3704 int i;
3705
3706 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3707
3708 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
3709 if (dev->pipeclk[i].hdl)
3710 clk_disable_unprepare(
3711 dev->pipeclk[i].hdl);
3712
3713 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3714}
3715
3716static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
3717{
3718 int i;
3719 u8 type;
3720 struct msm_pcie_device_info *dev_table = dev->pcidev_table;
3721
3722 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3723 if (!dev_table[i].bdf)
3724 break;
3725
3726 type = dev_table[i].bdf >> 24 == 0x1 ?
3727 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3728
3729 msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
3730 dev_table[i].phy_address + SZ_4K - 1,
3731 dev_table[i].bdf);
3732 }
3733}
3734
3735static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
3736{
3737 int i;
3738
3739 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3740
3741 /*
3742 * program and enable address translation region 0 (device config
3743 * address space); region type config;
3744 * axi config address range to device config address range
3745 */
3746 if (dev->enumerated) {
3747 msm_pcie_iatu_config_all_ep(dev);
3748 } else {
3749 dev->current_bdf = 0; /* to force IATU re-config */
3750 msm_pcie_cfg_bdf(dev, 1, 0);
3751 }
3752
3753 /* configure N_FTS */
3754 PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3755 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3756 if (!dev->n_fts)
3757 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3758 0, BIT(15));
3759 else
3760 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3761 PCIE20_ACK_N_FTS,
3762 dev->n_fts << 8);
3763
3764 if (dev->shadow_en)
3765 dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
3766 readl_relaxed(dev->dm_core +
3767 PCIE20_ACK_F_ASPM_CTRL_REG);
3768
3769 PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3770 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3771
3772 /* configure AUX clock frequency register for PCIe core */
3773 if (dev->use_19p2mhz_aux_clk)
3774 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
3775 else
3776 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
3777
3778 /* configure the completion timeout value for PCIe core */
3779 if (dev->cpl_timeout && dev->bridge_found)
3780 msm_pcie_write_reg_field(dev->dm_core,
3781 PCIE20_DEVICE_CONTROL2_STATUS2,
3782 0xf, dev->cpl_timeout);
3783
3784 /* Enable AER on RC */
3785 if (dev->aer_enable) {
3786 msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
3787 BIT(16)|BIT(17));
3788 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
3789 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3790
3791 PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3792 readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
3793 }
3794
3795 /* configure SMMU registers */
3796 if (dev->smmu_exist) {
3797 msm_pcie_write_reg(dev->parf,
3798 PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
3799 msm_pcie_write_reg(dev->parf,
3800 PCIE20_PARF_SID_OFFSET, 0);
3801
3802 if (dev->enumerated) {
3803 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3804 if (dev->pcidev_table[i].dev &&
3805 dev->pcidev_table[i].short_bdf) {
3806 msm_pcie_write_reg(dev->parf,
3807 PCIE20_PARF_BDF_TRANSLATE_N +
3808 dev->pcidev_table[i].short_bdf
3809 * 4,
3810 dev->pcidev_table[i].bdf >> 16);
3811 }
3812 }
3813 }
3814 }
3815}
3816
3817static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
3818{
3819 u32 val;
3820 u32 current_offset;
3821 u32 ep_l1sub_ctrl1_offset = 0;
3822 u32 ep_l1sub_cap_reg1_offset = 0;
3823 u32 ep_link_cap_offset = 0;
3824 u32 ep_link_ctrlstts_offset = 0;
3825 u32 ep_dev_ctrl2stts2_offset = 0;
3826
3827 /* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/
3828 if (!dev->aux_clk_sync && dev->l1ss_supported)
3829 msm_pcie_write_mask(dev->parf +
3830 PCIE20_PARF_SYS_CTRL, BIT(3), 0);
3831
3832 current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
3833
3834 while (current_offset) {
3835 if (msm_pcie_check_align(dev, current_offset))
3836 return;
3837
3838 val = readl_relaxed(dev->conf + current_offset);
3839 if ((val & 0xff) == PCIE20_CAP_ID) {
3840 ep_link_cap_offset = current_offset + 0x0c;
3841 ep_link_ctrlstts_offset = current_offset + 0x10;
3842 ep_dev_ctrl2stts2_offset = current_offset + 0x28;
3843 break;
3844 }
3845 current_offset = (val >> 8) & 0xff;
3846 }
3847
3848 if (!ep_link_cap_offset) {
3849 PCIE_DBG(dev,
3850 "RC%d endpoint does not support PCIe capability registers\n",
3851 dev->rc_idx);
3852 return;
3853 }
3854
3855 PCIE_DBG(dev,
3856 "RC%d: ep_link_cap_offset: 0x%x\n",
3857 dev->rc_idx, ep_link_cap_offset);
3858
3859 if (dev->common_clk_en) {
3860 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3861 0, BIT(6));
3862
3863 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3864 0, BIT(6));
3865
3866 if (dev->shadow_en) {
3867 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3868 readl_relaxed(dev->dm_core +
3869 PCIE20_CAP_LINKCTRLSTATUS);
3870
3871 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3872 readl_relaxed(dev->conf +
3873 ep_link_ctrlstts_offset);
3874 }
3875
3876 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3877 readl_relaxed(dev->dm_core +
3878 PCIE20_CAP_LINKCTRLSTATUS));
3879 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3880 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3881 }
3882
3883 if (dev->clk_power_manage_en) {
3884 val = readl_relaxed(dev->conf + ep_link_cap_offset);
3885 if (val & BIT(18)) {
3886 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3887 0, BIT(8));
3888
3889 if (dev->shadow_en)
3890 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3891 readl_relaxed(dev->conf +
3892 ep_link_ctrlstts_offset);
3893
3894 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3895 readl_relaxed(dev->conf +
3896 ep_link_ctrlstts_offset));
3897 }
3898 }
3899
3900 if (dev->l0s_supported) {
3901 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3902 0, BIT(0));
3903 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3904 0, BIT(0));
3905 if (dev->shadow_en) {
3906 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3907 readl_relaxed(dev->dm_core +
3908 PCIE20_CAP_LINKCTRLSTATUS);
3909 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3910 readl_relaxed(dev->conf +
3911 ep_link_ctrlstts_offset);
3912 }
3913 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3914 readl_relaxed(dev->dm_core +
3915 PCIE20_CAP_LINKCTRLSTATUS));
3916 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3917 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3918 }
3919
3920 if (dev->l1_supported) {
3921 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3922 0, BIT(1));
3923 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3924 0, BIT(1));
3925 if (dev->shadow_en) {
3926 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3927 readl_relaxed(dev->dm_core +
3928 PCIE20_CAP_LINKCTRLSTATUS);
3929 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3930 readl_relaxed(dev->conf +
3931 ep_link_ctrlstts_offset);
3932 }
3933 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3934 readl_relaxed(dev->dm_core +
3935 PCIE20_CAP_LINKCTRLSTATUS));
3936 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3937 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3938 }
3939
3940 if (dev->l1ss_supported) {
3941 current_offset = PCIE_EXT_CAP_OFFSET;
3942 while (current_offset) {
3943 if (msm_pcie_check_align(dev, current_offset))
3944 return;
3945
3946 val = readl_relaxed(dev->conf + current_offset);
3947 if ((val & 0xffff) == L1SUB_CAP_ID) {
3948 ep_l1sub_cap_reg1_offset = current_offset + 0x4;
3949 ep_l1sub_ctrl1_offset = current_offset + 0x8;
3950 break;
3951 }
3952 current_offset = val >> 20;
3953 }
3954 if (!ep_l1sub_ctrl1_offset) {
3955 PCIE_DBG(dev,
3956 "RC%d endpoint does not support l1ss registers\n",
3957 dev->rc_idx);
3958 return;
3959 }
3960
3961 val = readl_relaxed(dev->conf + ep_l1sub_cap_reg1_offset);
3962
3963 PCIE_DBG2(dev, "EP's L1SUB_CAPABILITY_REG_1: 0x%x\n", val);
3964 PCIE_DBG2(dev, "RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
3965 dev->rc_idx, ep_l1sub_ctrl1_offset);
3966
3967 val &= 0xf;
3968
3969 msm_pcie_write_reg_field(dev->dm_core, PCIE20_L1SUB_CONTROL1,
3970 0xf, val);
3971 msm_pcie_write_mask(dev->dm_core +
3972 PCIE20_DEVICE_CONTROL2_STATUS2,
3973 0, BIT(10));
3974 msm_pcie_write_reg_field(dev->conf, ep_l1sub_ctrl1_offset,
3975 0xf, val);
3976 msm_pcie_write_mask(dev->conf + ep_dev_ctrl2stts2_offset,
3977 0, BIT(10));
3978 if (dev->shadow_en) {
3979 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
3980 readl_relaxed(dev->dm_core +
3981 PCIE20_L1SUB_CONTROL1);
3982 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
3983 readl_relaxed(dev->dm_core +
3984 PCIE20_DEVICE_CONTROL2_STATUS2);
3985 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
3986 readl_relaxed(dev->conf +
3987 ep_l1sub_ctrl1_offset);
3988 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
3989 readl_relaxed(dev->conf +
3990 ep_dev_ctrl2stts2_offset);
3991 }
3992 PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n",
3993 readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1));
3994 PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
3995 readl_relaxed(dev->dm_core +
3996 PCIE20_DEVICE_CONTROL2_STATUS2));
3997 PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n",
3998 readl_relaxed(dev->conf + ep_l1sub_ctrl1_offset));
3999 PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
4000 readl_relaxed(dev->conf +
4001 ep_dev_ctrl2stts2_offset));
4002 }
4003}
4004
4005void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
4006{
4007 int i;
4008
4009 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4010
4011 /* program MSI controller and enable all interrupts */
4012 writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
4013 writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
4014
4015 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
4016 writel_relaxed(~0, dev->dm_core +
4017 PCIE20_MSI_CTRL_INTR_EN + (i * 12));
4018
4019 /* ensure that hardware is configured before proceeding */
4020 wmb();
4021}
4022
4023static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
4024 struct platform_device *pdev)
4025{
4026 int i, len, cnt, ret = 0, size = 0;
4027 struct msm_pcie_vreg_info_t *vreg_info;
4028 struct msm_pcie_gpio_info_t *gpio_info;
4029 struct msm_pcie_clk_info_t *clk_info;
4030 struct resource *res;
4031 struct msm_pcie_res_info_t *res_info;
4032 struct msm_pcie_irq_info_t *irq_info;
4033 struct msm_pcie_irq_info_t *msi_info;
4034 struct msm_pcie_reset_info_t *reset_info;
4035 struct msm_pcie_reset_info_t *pipe_reset_info;
4036 char prop_name[MAX_PROP_SIZE];
4037 const __be32 *prop;
4038 u32 *clkfreq = NULL;
4039
4040 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4041
4042 cnt = of_property_count_strings((&pdev->dev)->of_node,
4043 "clock-names");
4044 if (cnt > 0) {
4045 clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
4046 sizeof(*clkfreq), GFP_KERNEL);
4047 if (!clkfreq) {
4048 PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
4049 dev->rc_idx);
4050 return -ENOMEM;
4051 }
4052 ret = of_property_read_u32_array(
4053 (&pdev->dev)->of_node,
4054 "max-clock-frequency-hz", clkfreq, cnt);
4055 if (ret) {
4056 PCIE_ERR(dev,
4057 "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
4058 dev->rc_idx, ret);
4059 goto out;
4060 }
4061 }
4062
4063 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
4064 vreg_info = &dev->vreg[i];
4065 vreg_info->hdl =
4066 devm_regulator_get(&pdev->dev, vreg_info->name);
4067
4068 if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
4069 PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
4070 vreg_info->name);
4071 ret = PTR_ERR(vreg_info->hdl);
4072 goto out;
4073 }
4074
4075 if (IS_ERR(vreg_info->hdl)) {
4076 if (vreg_info->required) {
4077 PCIE_DBG(dev, "Vreg %s doesn't exist\n",
4078 vreg_info->name);
4079 ret = PTR_ERR(vreg_info->hdl);
4080 goto out;
4081 } else {
4082 PCIE_DBG(dev,
4083 "Optional Vreg %s doesn't exist\n",
4084 vreg_info->name);
4085 vreg_info->hdl = NULL;
4086 }
4087 } else {
4088 dev->vreg_n++;
4089 snprintf(prop_name, MAX_PROP_SIZE,
4090 "qcom,%s-voltage-level", vreg_info->name);
4091 prop = of_get_property((&pdev->dev)->of_node,
4092 prop_name, &len);
4093 if (!prop || (len != (3 * sizeof(__be32)))) {
4094 PCIE_DBG(dev, "%s %s property\n",
4095 prop ? "invalid format" :
4096 "no", prop_name);
4097 } else {
4098 vreg_info->max_v = be32_to_cpup(&prop[0]);
4099 vreg_info->min_v = be32_to_cpup(&prop[1]);
4100 vreg_info->opt_mode =
4101 be32_to_cpup(&prop[2]);
4102 }
4103 }
4104 }
4105
4106 dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
4107
4108 if (IS_ERR(dev->gdsc)) {
4109 PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
4110 dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
4111 if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
4112 PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
4113 dev->pdev->name);
4114 ret = PTR_ERR(dev->gdsc);
4115 goto out;
4116 }
4117
4118 dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
4119
4120 if (IS_ERR(dev->gdsc_smmu)) {
4121 PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
4122 dev->rc_idx);
4123 dev->gdsc_smmu = NULL;
4124 }
4125
4126 dev->gpio_n = 0;
4127 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
4128 gpio_info = &dev->gpio[i];
4129 ret = of_get_named_gpio((&pdev->dev)->of_node,
4130 gpio_info->name, 0);
4131 if (ret >= 0) {
4132 gpio_info->num = ret;
4133 dev->gpio_n++;
4134 PCIE_DBG(dev, "GPIO num for %s is %d\n",
4135 gpio_info->name, gpio_info->num);
4136 } else {
4137 if (gpio_info->required) {
4138 PCIE_ERR(dev,
4139 "Could not get required GPIO %s\n",
4140 gpio_info->name);
4141 goto out;
4142 } else {
4143 PCIE_DBG(dev,
4144 "Could not get optional GPIO %s\n",
4145 gpio_info->name);
4146 }
4147 }
4148 ret = 0;
4149 }
4150
4151 of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
4152 if (size) {
4153 dev->phy_sequence = (struct msm_pcie_phy_info_t *)
4154 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4155
4156 if (dev->phy_sequence) {
4157 dev->phy_len =
4158 size / sizeof(*dev->phy_sequence);
4159
4160 of_property_read_u32_array(pdev->dev.of_node,
4161 "qcom,phy-sequence",
4162 (unsigned int *)dev->phy_sequence,
4163 size / sizeof(dev->phy_sequence->offset));
4164 } else {
4165 PCIE_ERR(dev,
4166 "RC%d: Could not allocate memory for phy init sequence.\n",
4167 dev->rc_idx);
4168 ret = -ENOMEM;
4169 goto out;
4170 }
4171 } else {
4172 PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
4173 dev->rc_idx);
4174 }
4175
4176 of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
4177 if (size) {
4178 dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
4179 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4180
4181 if (dev->port_phy_sequence) {
4182 dev->port_phy_len =
4183 size / sizeof(*dev->port_phy_sequence);
4184
4185 of_property_read_u32_array(pdev->dev.of_node,
4186 "qcom,port-phy-sequence",
4187 (unsigned int *)dev->port_phy_sequence,
4188 size / sizeof(dev->port_phy_sequence->offset));
4189 } else {
4190 PCIE_ERR(dev,
4191 "RC%d: Could not allocate memory for port phy init sequence.\n",
4192 dev->rc_idx);
4193 ret = -ENOMEM;
4194 goto out;
4195 }
4196 } else {
4197 PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
4198 dev->rc_idx);
4199 }
4200
4201 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
4202 clk_info = &dev->clk[i];
4203
4204 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4205
4206 if (IS_ERR(clk_info->hdl)) {
4207 if (clk_info->required) {
4208 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4209 clk_info->name, PTR_ERR(clk_info->hdl));
4210 ret = PTR_ERR(clk_info->hdl);
4211 goto out;
4212 } else {
4213 PCIE_DBG(dev, "Ignoring Clock %s\n",
4214 clk_info->name);
4215 clk_info->hdl = NULL;
4216 }
4217 } else {
4218 if (clkfreq != NULL) {
4219 clk_info->freq = clkfreq[i +
4220 MSM_PCIE_MAX_PIPE_CLK];
4221 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4222 clk_info->name, clk_info->freq);
4223 }
4224 }
4225 }
4226
4227 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
4228 clk_info = &dev->pipeclk[i];
4229
4230 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4231
4232 if (IS_ERR(clk_info->hdl)) {
4233 if (clk_info->required) {
4234 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4235 clk_info->name, PTR_ERR(clk_info->hdl));
4236 ret = PTR_ERR(clk_info->hdl);
4237 goto out;
4238 } else {
4239 PCIE_DBG(dev, "Ignoring Clock %s\n",
4240 clk_info->name);
4241 clk_info->hdl = NULL;
4242 }
4243 } else {
4244 if (clkfreq != NULL) {
4245 clk_info->freq = clkfreq[i];
4246 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4247 clk_info->name, clk_info->freq);
4248 }
4249 }
4250 }
4251
4252 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
4253 reset_info = &dev->reset[i];
4254
4255 reset_info->hdl = devm_reset_control_get(&pdev->dev,
4256 reset_info->name);
4257
4258 if (IS_ERR(reset_info->hdl)) {
4259 if (reset_info->required) {
4260 PCIE_DBG(dev,
4261 "Reset %s isn't available:%ld\n",
4262 reset_info->name,
4263 PTR_ERR(reset_info->hdl));
4264
4265 ret = PTR_ERR(reset_info->hdl);
4266 reset_info->hdl = NULL;
4267 goto out;
4268 } else {
4269 PCIE_DBG(dev, "Ignoring Reset %s\n",
4270 reset_info->name);
4271 reset_info->hdl = NULL;
4272 }
4273 }
4274 }
4275
4276 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
4277 pipe_reset_info = &dev->pipe_reset[i];
4278
4279 pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
4280 pipe_reset_info->name);
4281
4282 if (IS_ERR(pipe_reset_info->hdl)) {
4283 if (pipe_reset_info->required) {
4284 PCIE_DBG(dev,
4285 "Pipe Reset %s isn't available:%ld\n",
4286 pipe_reset_info->name,
4287 PTR_ERR(pipe_reset_info->hdl));
4288
4289 ret = PTR_ERR(pipe_reset_info->hdl);
4290 pipe_reset_info->hdl = NULL;
4291 goto out;
4292 } else {
4293 PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
4294 pipe_reset_info->name);
4295 pipe_reset_info->hdl = NULL;
4296 }
4297 }
4298 }
4299
4300 dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
4301 if (!dev->bus_scale_table) {
4302 PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
4303 dev->rc_idx, dev->pdev->name);
4304 dev->bus_client = 0;
4305 } else {
4306 dev->bus_client =
4307 msm_bus_scale_register_client(dev->bus_scale_table);
4308 if (!dev->bus_client) {
4309 PCIE_ERR(dev,
4310 "PCIe: Failed to register bus client for RC%d (%s)\n",
4311 dev->rc_idx, dev->pdev->name);
4312 msm_bus_cl_clear_pdata(dev->bus_scale_table);
4313 ret = -ENODEV;
4314 goto out;
4315 }
4316 }
4317
4318 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
4319 res_info = &dev->res[i];
4320
4321 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4322 res_info->name);
4323
4324 if (!res) {
4325 PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
4326 dev->rc_idx, res_info->name);
4327 } else {
4328 PCIE_DBG(dev, "start addr for %s is %pa.\n",
4329 res_info->name, &res->start);
4330
4331 res_info->base = devm_ioremap(&pdev->dev,
4332 res->start, resource_size(res));
4333 if (!res_info->base) {
4334 PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
4335 dev->rc_idx, res_info->name);
4336 ret = -ENOMEM;
4337 goto out;
4338 } else {
4339 res_info->resource = res;
4340 }
4341 }
4342 }
4343
4344 for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
4345 irq_info = &dev->irq[i];
4346
4347 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4348 irq_info->name);
4349
4350 if (!res) {
4351 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4352 dev->rc_idx, irq_info->name);
4353 } else {
4354 irq_info->num = res->start;
4355 PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
4356 irq_info->num);
4357 }
4358 }
4359
4360 for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
4361 msi_info = &dev->msi[i];
4362
4363 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4364 msi_info->name);
4365
4366 if (!res) {
4367 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4368 dev->rc_idx, msi_info->name);
4369 } else {
4370 msi_info->num = res->start;
4371 PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
4372 msi_info->num);
4373 }
4374 }
4375
4376 /* All allocations succeeded */
4377
4378 if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
4379 dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
4380 else
4381 dev->wake_n = 0;
4382
4383 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
4384 dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
4385 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
4386 dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
4387 dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
4388 dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
4389 dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
4390 dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
4391 dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
4392 dev->dev_io_res->flags = IORESOURCE_IO;
4393
4394out:
4395 kfree(clkfreq);
4396
4397 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4398
4399 return ret;
4400}
4401
4402static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
4403{
4404 dev->parf = NULL;
4405 dev->elbi = NULL;
4406 dev->dm_core = NULL;
4407 dev->conf = NULL;
4408 dev->bars = NULL;
4409 dev->tcsr = NULL;
4410 dev->dev_mem_res = NULL;
4411 dev->dev_io_res = NULL;
4412}
4413
4414int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
4415{
4416 int ret = 0;
4417 uint32_t val;
4418 long int retries = 0;
4419 int link_check_count = 0;
4420
4421 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4422
4423 mutex_lock(&dev->setup_lock);
4424
4425 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
4426 PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
4427 dev->rc_idx);
4428 goto out;
4429 }
4430
4431 /* assert PCIe reset link to keep EP in reset */
4432
4433 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4434 dev->rc_idx);
4435 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4436 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4437 usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
4438 PERST_PROPAGATION_DELAY_US_MAX);
4439
4440 /* enable power */
4441
4442 if (options & PM_VREG) {
4443 ret = msm_pcie_vreg_init(dev);
4444 if (ret)
4445 goto out;
4446 }
4447
4448 /* enable clocks */
4449 if (options & PM_CLK) {
4450 ret = msm_pcie_clk_init(dev);
4451 /* ensure that changes propagated to the hardware */
4452 wmb();
4453 if (ret)
4454 goto clk_fail;
4455 }
4456
4457 if (dev->scm_dev_id) {
4458 PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
4459 msm_pcie_restore_sec_config(dev);
4460 }
4461
4462 /* enable PCIe clocks and resets */
4463 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
4464
4465 /* change DBI base address */
4466 writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
4467
4468 writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
4469
4470 msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
4471 0, BIT(4));
4472
4473 /* enable selected IRQ */
4474 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
4475 msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
4476
4477 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
4478 BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
4479 BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
4480 BIT(MSM_PCIE_INT_EVT_AER_ERR) |
4481 BIT(MSM_PCIE_INT_EVT_MSI_0) |
4482 BIT(MSM_PCIE_INT_EVT_MSI_1) |
4483 BIT(MSM_PCIE_INT_EVT_MSI_2) |
4484 BIT(MSM_PCIE_INT_EVT_MSI_3) |
4485 BIT(MSM_PCIE_INT_EVT_MSI_4) |
4486 BIT(MSM_PCIE_INT_EVT_MSI_5) |
4487 BIT(MSM_PCIE_INT_EVT_MSI_6) |
4488 BIT(MSM_PCIE_INT_EVT_MSI_7));
4489
4490 PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
4491 dev->rc_idx,
4492 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
4493 }
4494
4495 if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_16M)
4496 writel_relaxed(SZ_32M, dev->parf +
4497 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4498 else if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_8M)
4499 writel_relaxed(SZ_16M, dev->parf +
4500 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4501 else
4502 writel_relaxed(SZ_8M, dev->parf +
4503 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4504
4505 if (dev->use_msi) {
4506 PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
4507 val = dev->wr_halt_size ? dev->wr_halt_size :
4508 readl_relaxed(dev->parf +
4509 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
4510
4511 msm_pcie_write_reg(dev->parf,
4512 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
4513 BIT(31) | val);
4514
4515 PCIE_DBG(dev,
4516 "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
4517 dev->rc_idx,
4518 readl_relaxed(dev->parf +
4519 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
4520 }
4521
4522 mutex_lock(&com_phy_lock);
4523 /* init PCIe PHY */
4524 if (!num_rc_on)
4525 pcie_phy_init(dev);
4526
4527 num_rc_on++;
4528 mutex_unlock(&com_phy_lock);
4529
4530 if (options & PM_PIPE_CLK) {
4531 usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
4532 PHY_STABILIZATION_DELAY_US_MAX);
4533 /* Enable the pipe clock */
4534 ret = msm_pcie_pipe_clk_init(dev);
4535 /* ensure that changes propagated to the hardware */
4536 wmb();
4537 if (ret)
4538 goto link_fail;
4539 }
4540
4541 PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
4542
4543 do {
4544 if (pcie_phy_is_ready(dev))
4545 break;
4546 retries++;
4547 usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
4548 REFCLK_STABILIZATION_DELAY_US_MAX);
4549 } while (retries < PHY_READY_TIMEOUT_COUNT);
4550
4551 PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
4552 dev->rc_idx, retries);
4553
4554 if (pcie_phy_is_ready(dev))
4555 PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
4556 else {
4557 PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
4558 dev->rc_idx);
4559 ret = -ENODEV;
4560 pcie_phy_dump(dev);
4561 goto link_fail;
4562 }
4563
4564 pcie_pcs_port_phy_init(dev);
4565
4566 if (dev->ep_latency)
4567 usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
4568
4569 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4570 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4571 dev->gpio[MSM_PCIE_GPIO_EP].on);
4572
4573 /* de-assert PCIe reset link to bring EP out of reset */
4574
4575 PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
4576 dev->rc_idx);
4577 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4578 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
4579 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
4580
4581 /* set max tlp read size */
4582 msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
4583 0x7000, dev->tlp_rd_size);
4584
4585 /* enable link training */
4586 msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
4587
4588 PCIE_DBG(dev, "%s", "check if link is up\n");
4589
4590 /* Wait for up to 100ms for the link to come up */
4591 do {
4592 usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
4593 val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
4594 } while ((!(val & XMLH_LINK_UP) ||
4595 !msm_pcie_confirm_linkup(dev, false, false, NULL))
4596 && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
4597
4598 if ((val & XMLH_LINK_UP) &&
4599 msm_pcie_confirm_linkup(dev, false, false, NULL)) {
4600 PCIE_DBG(dev, "Link is up after %d checkings\n",
4601 link_check_count);
4602 PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
4603 } else {
4604 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4605 dev->rc_idx);
4606 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4607 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4608 PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
4609 dev->rc_idx);
4610 ret = -1;
4611 goto link_fail;
4612 }
4613
4614 msm_pcie_config_controller(dev);
4615
4616 if (!dev->msi_gicm_addr)
4617 msm_pcie_config_msi_controller(dev);
4618
4619 msm_pcie_config_link_state(dev);
4620
4621 dev->link_status = MSM_PCIE_LINK_ENABLED;
4622 dev->power_on = true;
4623 dev->suspending = false;
4624 dev->link_turned_on_counter++;
4625
4626 goto out;
4627
4628link_fail:
4629 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4630 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4631 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4632 msm_pcie_write_reg(dev->phy,
4633 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4634 msm_pcie_write_reg(dev->phy,
4635 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4636
4637 mutex_lock(&com_phy_lock);
4638 num_rc_on--;
4639 if (!num_rc_on && dev->common_phy) {
4640 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4641 dev->rc_idx);
4642 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4643 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4644 }
4645 mutex_unlock(&com_phy_lock);
4646
4647 msm_pcie_pipe_clk_deinit(dev);
4648 msm_pcie_clk_deinit(dev);
4649clk_fail:
4650 msm_pcie_vreg_deinit(dev);
4651out:
4652 mutex_unlock(&dev->setup_lock);
4653
4654 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4655
4656 return ret;
4657}
4658
4659void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
4660{
4661 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4662
4663 mutex_lock(&dev->setup_lock);
4664
4665 if (!dev->power_on) {
4666 PCIE_DBG(dev,
4667 "PCIe: the link of RC%d is already power down.\n",
4668 dev->rc_idx);
4669 mutex_unlock(&dev->setup_lock);
4670 return;
4671 }
4672
4673 dev->link_status = MSM_PCIE_LINK_DISABLED;
4674 dev->power_on = false;
4675 dev->link_turned_off_counter++;
4676
4677 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4678 dev->rc_idx);
4679
4680 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4681 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4682
4683 msm_pcie_write_reg(dev->phy,
4684 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4685 msm_pcie_write_reg(dev->phy,
4686 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4687
4688 mutex_lock(&com_phy_lock);
4689 num_rc_on--;
4690 if (!num_rc_on && dev->common_phy) {
4691 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4692 dev->rc_idx);
4693 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4694 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4695 }
4696 mutex_unlock(&com_phy_lock);
4697
4698 if (options & PM_CLK) {
4699 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
4700 BIT(0));
4701 msm_pcie_clk_deinit(dev);
4702 }
4703
4704 if (options & PM_VREG)
4705 msm_pcie_vreg_deinit(dev);
4706
4707 if (options & PM_PIPE_CLK)
4708 msm_pcie_pipe_clk_deinit(dev);
4709
4710 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4711 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4712 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4713
4714 mutex_unlock(&dev->setup_lock);
4715
4716 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4717}
4718
4719static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
4720 struct msm_pcie_device_info *ep_dev_info)
4721{
4722 u32 val;
4723 void __iomem *ep_base = ep_dev_info->conf_base;
4724 u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
4725 0xff;
4726
4727 while (current_offset) {
4728 if (msm_pcie_check_align(dev, current_offset))
4729 return;
4730
4731 val = readl_relaxed(ep_base + current_offset);
4732 if ((val & 0xff) == PCIE20_CAP_ID) {
4733 ep_dev_info->dev_ctrlstts_offset =
4734 current_offset + 0x8;
4735 break;
4736 }
4737 current_offset = (val >> 8) & 0xff;
4738 }
4739
4740 if (!ep_dev_info->dev_ctrlstts_offset) {
4741 PCIE_DBG(dev,
4742 "RC%d endpoint does not support PCIe cap registers\n",
4743 dev->rc_idx);
4744 return;
4745 }
4746
4747 PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
4748 dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
4749
4750 /* Enable AER on EP */
4751 msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
4752 BIT(3)|BIT(2)|BIT(1)|BIT(0));
4753
4754 PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
4755 readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
4756}
4757
4758static int msm_pcie_config_device_table(struct device *dev, void *pdev)
4759{
4760 struct pci_dev *pcidev = to_pci_dev(dev);
4761 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
4762 struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
4763 struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
4764 int ret = 0;
4765 u32 rc_idx = pcie_dev->rc_idx;
4766 u32 i, index;
4767 u32 bdf = 0;
4768 u8 type;
4769 u32 h_type;
4770 u32 bme;
4771
4772 if (!pcidev) {
4773 PCIE_ERR(pcie_dev,
4774 "PCIe: Did not find PCI device in list for RC%d.\n",
4775 pcie_dev->rc_idx);
4776 return -ENODEV;
4777 }
4778
4779 PCIE_DBG(pcie_dev,
4780 "PCI device found: vendor-id:0x%x device-id:0x%x\n",
4781 pcidev->vendor, pcidev->device);
4782
4783 if (!pcidev->bus->number)
4784 return ret;
4785
4786 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4787 type = pcidev->bus->number == 1 ?
4788 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
4789
4790 for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
4791 if (msm_pcie_dev_tbl[i].bdf == bdf &&
4792 !msm_pcie_dev_tbl[i].dev) {
4793 for (index = 0; index < MAX_DEVICE_NUM; index++) {
4794 if (dev_table_t[index].bdf == bdf) {
4795 msm_pcie_dev_tbl[i].dev = pcidev;
4796 msm_pcie_dev_tbl[i].domain = rc_idx;
4797 msm_pcie_dev_tbl[i].conf_base =
4798 pcie_dev->conf + index * SZ_4K;
4799 msm_pcie_dev_tbl[i].phy_address =
4800 axi_conf->start + index * SZ_4K;
4801
4802 dev_table_t[index].dev = pcidev;
4803 dev_table_t[index].domain = rc_idx;
4804 dev_table_t[index].conf_base =
4805 pcie_dev->conf + index * SZ_4K;
4806 dev_table_t[index].phy_address =
4807 axi_conf->start + index * SZ_4K;
4808
4809 msm_pcie_iatu_config(pcie_dev, index,
4810 type,
4811 dev_table_t[index].phy_address,
4812 dev_table_t[index].phy_address
4813 + SZ_4K - 1,
4814 bdf);
4815
4816 h_type = readl_relaxed(
4817 dev_table_t[index].conf_base +
4818 PCIE20_HEADER_TYPE);
4819
4820 bme = readl_relaxed(
4821 dev_table_t[index].conf_base +
4822 PCIE20_COMMAND_STATUS);
4823
4824 if (h_type & (1 << 16)) {
4825 pci_write_config_dword(pcidev,
4826 PCIE20_COMMAND_STATUS,
4827 bme | 0x06);
4828 } else {
4829 pcie_dev->num_ep++;
4830 dev_table_t[index].registered =
4831 false;
4832 }
4833
4834 if (pcie_dev->num_ep > 1)
4835 pcie_dev->pending_ep_reg = true;
4836
4837 msm_pcie_config_ep_aer(pcie_dev,
4838 &dev_table_t[index]);
4839
4840 break;
4841 }
4842 }
4843 if (index == MAX_DEVICE_NUM) {
4844 PCIE_ERR(pcie_dev,
4845 "RC%d PCI device table is full.\n",
4846 rc_idx);
4847 ret = index;
4848 } else {
4849 break;
4850 }
4851 } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
4852 pcidev == msm_pcie_dev_tbl[i].dev) {
4853 break;
4854 }
4855 }
4856 if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
4857 PCIE_ERR(pcie_dev,
4858 "Global PCI device table is full: %d elements.\n",
4859 i);
4860 PCIE_ERR(pcie_dev,
4861 "Bus number is 0x%x\nDevice number is 0x%x\n",
4862 pcidev->bus->number, pcidev->devfn);
4863 ret = i;
4864 }
4865 return ret;
4866}
4867
4868int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
4869{
4870 struct pci_dev *pcidev;
4871 struct msm_pcie_dev_t *pcie_dev;
4872 struct pci_bus *bus;
4873 int i;
4874 u32 bdf;
4875
4876 if (!dev) {
4877 pr_err("%s: PCIe: endpoint device passed in is NULL\n",
4878 __func__);
4879 return MSM_PCIE_ERROR;
4880 }
4881
4882 pcidev = to_pci_dev(dev);
4883 if (!pcidev) {
4884 pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
4885 __func__);
4886 return MSM_PCIE_ERROR;
4887 }
4888
4889 bus = pcidev->bus;
4890 if (!bus) {
4891 pr_err("%s: PCIe: Bus of PCI device is NULL\n",
4892 __func__);
4893 return MSM_PCIE_ERROR;
4894 }
4895
4896 while (!pci_is_root_bus(bus))
4897 bus = bus->parent;
4898
4899 pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
4900 if (!pcie_dev) {
4901 pr_err("%s: PCIe: Could not get PCIe structure\n",
4902 __func__);
4903 return MSM_PCIE_ERROR;
4904 }
4905
4906 if (!pcie_dev->smmu_exist) {
4907 PCIE_DBG(pcie_dev,
4908 "PCIe: RC:%d: smmu does not exist\n",
4909 pcie_dev->rc_idx);
4910 return MSM_PCIE_ERROR;
4911 }
4912
4913 PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
4914 pcie_dev->rc_idx, dev);
4915 PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
4916 pcie_dev->rc_idx, pcidev);
4917
4918 *domain = pcie_dev->rc_idx;
4919
4920 if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
4921 pcie_dev->current_short_bdf++;
4922 } else {
4923 PCIE_ERR(pcie_dev,
4924 "PCIe: RC%d: No more short BDF left\n",
4925 pcie_dev->rc_idx);
4926 return MSM_PCIE_ERROR;
4927 }
4928
4929 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4930
4931 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4932 if (pcie_dev->pcidev_table[i].bdf == bdf) {
4933 *sid = pcie_dev->smmu_sid_base +
4934 ((pcie_dev->rc_idx << 4) |
4935 pcie_dev->current_short_bdf);
4936
4937 msm_pcie_write_reg(pcie_dev->parf,
4938 PCIE20_PARF_BDF_TRANSLATE_N +
4939 pcie_dev->current_short_bdf * 4,
4940 bdf >> 16);
4941
4942 pcie_dev->pcidev_table[i].sid = *sid;
4943 pcie_dev->pcidev_table[i].short_bdf =
4944 pcie_dev->current_short_bdf;
4945 break;
4946 }
4947 }
4948
4949 if (i == MAX_DEVICE_NUM) {
4950 pcie_dev->current_short_bdf--;
4951 PCIE_ERR(pcie_dev,
4952 "PCIe: RC%d could not find BDF:%d\n",
4953 pcie_dev->rc_idx, bdf);
4954 return MSM_PCIE_ERROR;
4955 }
4956
4957 PCIE_DBG(pcie_dev,
4958 "PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
4959 pcie_dev->rc_idx,
4960 bdf >> 24,
4961 bdf >> 19 & 0x1f,
4962 bdf >> 16 & 0x07,
4963 *sid);
4964
4965 return 0;
4966}
4967EXPORT_SYMBOL(msm_pcie_configure_sid);
4968
4969int msm_pcie_enumerate(u32 rc_idx)
4970{
4971 int ret = 0, bus_ret = 0, scan_ret = 0;
4972 struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
4973
4974 mutex_lock(&dev->enumerate_lock);
4975
4976 PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
4977
4978 if (!dev->drv_ready) {
4979 PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
4980 rc_idx);
4981 ret = -EPROBE_DEFER;
4982 goto out;
4983 }
4984
4985 if (!dev->enumerated) {
4986 ret = msm_pcie_enable(dev, PM_ALL);
4987
4988 /* kick start ARM PCI configuration framework */
4989 if (!ret) {
4990 struct pci_dev *pcidev = NULL;
4991 bool found = false;
4992 struct pci_bus *bus;
4993 resource_size_t iobase = 0;
4994 u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
4995 u32 vendor_id = ids & 0xffff;
4996 u32 device_id = (ids & 0xffff0000) >> 16;
4997 LIST_HEAD(res);
4998
4999 PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
5000 vendor_id, device_id);
5001
5002 ret = of_pci_get_host_bridge_resources(
5003 dev->pdev->dev.of_node,
5004 0, 0xff, &res, &iobase);
5005 if (ret) {
5006 PCIE_ERR(dev,
5007 "PCIe: failed to get host bridge resources for RC%d: %d\n",
5008 dev->rc_idx, ret);
5009 goto out;
5010 }
5011
5012 bus = pci_create_root_bus(&dev->pdev->dev, 0,
5013 &msm_pcie_ops,
5014 msm_pcie_setup_sys_data(dev),
5015 &res);
5016 if (!bus) {
5017 PCIE_ERR(dev,
5018 "PCIe: failed to create root bus for RC%d\n",
5019 dev->rc_idx);
5020 ret = -ENOMEM;
5021 goto out;
5022 }
5023
5024 scan_ret = pci_scan_child_bus(bus);
5025 PCIE_DBG(dev,
5026 "PCIe: RC%d: The max subordinate bus number discovered is %d\n",
5027 dev->rc_idx, ret);
5028
5029 msm_pcie_fixup_irqs(dev);
5030 pci_assign_unassigned_bus_resources(bus);
5031 pci_bus_add_devices(bus);
5032
5033 dev->enumerated = true;
5034
5035 msm_pcie_write_mask(dev->dm_core +
5036 PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
5037
5038 if (dev->cpl_timeout && dev->bridge_found)
5039 msm_pcie_write_reg_field(dev->dm_core,
5040 PCIE20_DEVICE_CONTROL2_STATUS2,
5041 0xf, dev->cpl_timeout);
5042
5043 if (dev->shadow_en) {
5044 u32 val = readl_relaxed(dev->dm_core +
5045 PCIE20_COMMAND_STATUS);
5046 PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
5047 val);
5048 dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
5049 }
5050
5051 do {
5052 pcidev = pci_get_device(vendor_id,
5053 device_id, pcidev);
5054 if (pcidev && (&msm_pcie_dev[rc_idx] ==
5055 (struct msm_pcie_dev_t *)
5056 PCIE_BUS_PRIV_DATA(pcidev->bus))) {
5057 msm_pcie_dev[rc_idx].dev = pcidev;
5058 found = true;
5059 PCIE_DBG(&msm_pcie_dev[rc_idx],
5060 "PCI device is found for RC%d\n",
5061 rc_idx);
5062 }
5063 } while (!found && pcidev);
5064
5065 if (!pcidev) {
5066 PCIE_ERR(dev,
5067 "PCIe: Did not find PCI device for RC%d.\n",
5068 dev->rc_idx);
5069 ret = -ENODEV;
5070 goto out;
5071 }
5072
5073 bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
5074 &msm_pcie_config_device_table);
5075
5076 if (bus_ret) {
5077 PCIE_ERR(dev,
5078 "PCIe: Failed to set up device table for RC%d\n",
5079 dev->rc_idx);
5080 ret = -ENODEV;
5081 goto out;
5082 }
5083 } else {
5084 PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
5085 dev->rc_idx);
5086 }
5087 } else {
5088 PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
5089 dev->rc_idx);
5090 }
5091
5092out:
5093 mutex_unlock(&dev->enumerate_lock);
5094
5095 return ret;
5096}
5097EXPORT_SYMBOL(msm_pcie_enumerate);
5098
5099static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
5100 enum msm_pcie_event event)
5101{
5102 if (dev->event_reg && dev->event_reg->callback &&
5103 (dev->event_reg->events & event)) {
5104 struct msm_pcie_notify *notify = &dev->event_reg->notify;
5105
5106 notify->event = event;
5107 notify->user = dev->event_reg->user;
5108 PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
5109 dev->rc_idx, event);
5110 dev->event_reg->callback(notify);
5111
5112 if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
5113 (event == MSM_PCIE_EVENT_LINKDOWN)) {
5114 dev->user_suspend = true;
5115 PCIE_DBG(dev,
5116 "PCIe: Client of RC%d will recover the link later.\n",
5117 dev->rc_idx);
5118 return;
5119 }
5120 } else {
5121 PCIE_DBG2(dev,
5122 "PCIe: Client of RC%d does not have registration for event %d\n",
5123 dev->rc_idx, event);
5124 }
5125}
5126
5127static void handle_wake_func(struct work_struct *work)
5128{
5129 int i, ret;
5130 struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
5131 handle_wake_work);
5132
5133 PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
5134
5135 mutex_lock(&dev->recovery_lock);
5136
5137 if (!dev->enumerated) {
5138 PCIE_DBG(dev,
5139 "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
5140 dev->rc_idx);
5141
5142 ret = msm_pcie_enumerate(dev->rc_idx);
5143 if (ret) {
5144 PCIE_ERR(dev,
5145 "PCIe: failed to enable RC%d upon wake request from the device.\n",
5146 dev->rc_idx);
5147 goto out;
5148 }
5149
5150 if (dev->num_ep > 1) {
5151 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5152 dev->event_reg = dev->pcidev_table[i].event_reg;
5153
5154 if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
5155 && dev->event_reg &&
5156 dev->event_reg->callback &&
5157 (dev->event_reg->events &
5158 MSM_PCIE_EVENT_LINKUP)) {
5159 struct msm_pcie_notify *notify =
5160 &dev->event_reg->notify;
5161 notify->event = MSM_PCIE_EVENT_LINKUP;
5162 notify->user = dev->event_reg->user;
5163 PCIE_DBG(dev,
5164 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5165 dev->rc_idx);
5166 dev->event_reg->callback(notify);
5167 }
5168 }
5169 } else {
5170 if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
5171 dev->event_reg && dev->event_reg->callback &&
5172 (dev->event_reg->events &
5173 MSM_PCIE_EVENT_LINKUP)) {
5174 struct msm_pcie_notify *notify =
5175 &dev->event_reg->notify;
5176 notify->event = MSM_PCIE_EVENT_LINKUP;
5177 notify->user = dev->event_reg->user;
5178 PCIE_DBG(dev,
5179 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5180 dev->rc_idx);
5181 dev->event_reg->callback(notify);
5182 } else {
5183 PCIE_DBG(dev,
5184 "PCIe: Client of RC%d does not have registration for linkup event.\n",
5185 dev->rc_idx);
5186 }
5187 }
5188 goto out;
5189 } else {
5190 PCIE_ERR(dev,
5191 "PCIe: The enumeration for RC%d has already been done.\n",
5192 dev->rc_idx);
5193 goto out;
5194 }
5195
5196out:
5197 mutex_unlock(&dev->recovery_lock);
5198}
5199
5200static irqreturn_t handle_aer_irq(int irq, void *data)
5201{
5202 struct msm_pcie_dev_t *dev = data;
5203
5204 int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
5205 int ep_corr_val = 0, ep_uncorr_val = 0;
5206 int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
5207 u32 ep_dev_ctrlstts_offset = 0;
5208 int i, j, ep_src_bdf = 0;
5209 void __iomem *ep_base = NULL;
5210 unsigned long irqsave_flags;
5211
5212 PCIE_DBG2(dev,
5213 "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
5214 dev->rc_idx, irq, dev->rc_corr_counter,
5215 dev->rc_non_fatal_counter, dev->rc_fatal_counter,
5216 dev->ep_corr_counter, dev->ep_non_fatal_counter,
5217 dev->ep_fatal_counter);
5218
5219 spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
5220
5221 if (dev->suspending) {
5222 PCIE_DBG2(dev,
5223 "PCIe: RC%d is currently suspending.\n",
5224 dev->rc_idx);
5225 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5226 return IRQ_HANDLED;
5227 }
5228
5229 uncorr_val = readl_relaxed(dev->dm_core +
5230 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5231 corr_val = readl_relaxed(dev->dm_core +
5232 PCIE20_AER_CORR_ERR_STATUS_REG);
5233 rc_err_status = readl_relaxed(dev->dm_core +
5234 PCIE20_AER_ROOT_ERR_STATUS_REG);
5235 rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
5236 PCIE20_CAP_DEVCTRLSTATUS);
5237
5238 if (uncorr_val)
5239 PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5240 uncorr_val);
5241 if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
5242 PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5243 corr_val);
5244
5245 if ((rc_dev_ctrlstts >> 18) & 0x1)
5246 dev->rc_fatal_counter++;
5247 if ((rc_dev_ctrlstts >> 17) & 0x1)
5248 dev->rc_non_fatal_counter++;
5249 if ((rc_dev_ctrlstts >> 16) & 0x1)
5250 dev->rc_corr_counter++;
5251
5252 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
5253 BIT(18)|BIT(17)|BIT(16));
5254
5255 if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
5256 PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
5257 goto out;
5258 }
5259
5260 for (i = 0; i < 2; i++) {
5261 if (i)
5262 ep_src_bdf = readl_relaxed(dev->dm_core +
5263 PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
5264 else
5265 ep_src_bdf = (readl_relaxed(dev->dm_core +
5266 PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
5267
5268 if (!ep_src_bdf)
5269 continue;
5270
5271 for (j = 0; j < MAX_DEVICE_NUM; j++) {
5272 if (ep_src_bdf == dev->pcidev_table[j].bdf) {
5273 PCIE_DBG2(dev,
5274 "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
5275 i ? "Uncorrectable" : "Correctable",
5276 dev->pcidev_table[j].bdf >> 24,
5277 dev->pcidev_table[j].bdf >> 19 & 0x1f,
5278 dev->pcidev_table[j].bdf >> 16 & 0x07);
5279 ep_base = dev->pcidev_table[j].conf_base;
5280 ep_dev_ctrlstts_offset = dev->
5281 pcidev_table[j].dev_ctrlstts_offset;
5282 break;
5283 }
5284 }
5285
5286 if (!ep_base) {
5287 PCIE_ERR(dev,
5288 "PCIe: RC%d no endpoint found for reported error\n",
5289 dev->rc_idx);
5290 goto out;
5291 }
5292
5293 ep_uncorr_val = readl_relaxed(ep_base +
5294 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5295 ep_corr_val = readl_relaxed(ep_base +
5296 PCIE20_AER_CORR_ERR_STATUS_REG);
5297 ep_dev_ctrlstts = readl_relaxed(ep_base +
5298 ep_dev_ctrlstts_offset);
5299
5300 if (ep_uncorr_val)
5301 PCIE_DBG(dev,
5302 "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5303 ep_uncorr_val);
5304 if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
5305 PCIE_DBG(dev,
5306 "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5307 ep_corr_val);
5308
5309 if ((ep_dev_ctrlstts >> 18) & 0x1)
5310 dev->ep_fatal_counter++;
5311 if ((ep_dev_ctrlstts >> 17) & 0x1)
5312 dev->ep_non_fatal_counter++;
5313 if ((ep_dev_ctrlstts >> 16) & 0x1)
5314 dev->ep_corr_counter++;
5315
5316 msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
5317 BIT(18)|BIT(17)|BIT(16));
5318
5319 msm_pcie_write_reg_field(ep_base,
5320 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5321 0x3fff031, 0x3fff031);
5322 msm_pcie_write_reg_field(ep_base,
5323 PCIE20_AER_CORR_ERR_STATUS_REG,
5324 0xf1c1, 0xf1c1);
5325 }
5326out:
5327 if (((dev->rc_corr_counter < corr_counter_limit) &&
5328 (dev->ep_corr_counter < corr_counter_limit)) ||
5329 uncorr_val || ep_uncorr_val)
5330 PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
5331 rc_err_status);
5332 msm_pcie_write_reg_field(dev->dm_core,
5333 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5334 0x3fff031, 0x3fff031);
5335 msm_pcie_write_reg_field(dev->dm_core,
5336 PCIE20_AER_CORR_ERR_STATUS_REG,
5337 0xf1c1, 0xf1c1);
5338 msm_pcie_write_reg_field(dev->dm_core,
5339 PCIE20_AER_ROOT_ERR_STATUS_REG,
5340 0x7f, 0x7f);
5341
5342 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5343 return IRQ_HANDLED;
5344}
5345
5346static irqreturn_t handle_wake_irq(int irq, void *data)
5347{
5348 struct msm_pcie_dev_t *dev = data;
5349 unsigned long irqsave_flags;
5350 int i;
5351
5352 spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
5353
5354 dev->wake_counter++;
5355 PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
5356 dev->wake_counter, dev->rc_idx);
5357
5358 PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
5359 dev->rc_idx);
5360
5361 if (!dev->enumerated) {
5362 PCIE_DBG(dev, "Start enumeating RC%d\n", dev->rc_idx);
5363 if (dev->ep_wakeirq)
5364 schedule_work(&dev->handle_wake_work);
5365 else
5366 PCIE_DBG(dev,
5367 "wake irq is received but ep_wakeirq is not supported for RC%d.\n",
5368 dev->rc_idx);
5369 } else {
5370 PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
5371 __pm_stay_awake(&dev->ws);
5372 __pm_relax(&dev->ws);
5373
5374 if (dev->num_ep > 1) {
5375 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5376 dev->event_reg =
5377 dev->pcidev_table[i].event_reg;
5378 msm_pcie_notify_client(dev,
5379 MSM_PCIE_EVENT_WAKEUP);
5380 }
5381 } else {
5382 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
5383 }
5384 }
5385
5386 spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
5387
5388 return IRQ_HANDLED;
5389}
5390
5391static irqreturn_t handle_linkdown_irq(int irq, void *data)
5392{
5393 struct msm_pcie_dev_t *dev = data;
5394 unsigned long irqsave_flags;
5395 int i;
5396
5397 spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
5398
5399 dev->linkdown_counter++;
5400
5401 PCIE_DBG(dev,
5402 "PCIe: No. %ld linkdown IRQ for RC%d.\n",
5403 dev->linkdown_counter, dev->rc_idx);
5404
5405 if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
5406 PCIE_DBG(dev,
5407 "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
5408 dev->rc_idx);
5409 } else if (dev->suspending) {
5410 PCIE_DBG(dev,
5411 "PCIe:the link of RC%d is suspending.\n",
5412 dev->rc_idx);
5413 } else {
5414 dev->link_status = MSM_PCIE_LINK_DISABLED;
5415 dev->shadow_en = false;
5416
5417 if (dev->linkdown_panic)
5418 panic("User has chosen to panic on linkdown\n");
5419
5420 /* assert PERST */
5421 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
5422 dev->gpio[MSM_PCIE_GPIO_PERST].on);
5423 PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
5424
5425 if (dev->num_ep > 1) {
5426 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5427 dev->event_reg =
5428 dev->pcidev_table[i].event_reg;
5429 msm_pcie_notify_client(dev,
5430 MSM_PCIE_EVENT_LINKDOWN);
5431 }
5432 } else {
5433 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
5434 }
5435 }
5436
5437 spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
5438
5439 return IRQ_HANDLED;
5440}
5441
5442static irqreturn_t handle_msi_irq(int irq, void *data)
5443{
5444 int i, j;
5445 unsigned long val;
5446 struct msm_pcie_dev_t *dev = data;
5447 void __iomem *ctrl_status;
5448
5449 PCIE_DUMP(dev, "irq: %d\n", irq);
5450
5451 /*
5452 * check for set bits, clear it by setting that bit
5453 * and trigger corresponding irq
5454 */
5455 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
5456 ctrl_status = dev->dm_core +
5457 PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
5458
5459 val = readl_relaxed(ctrl_status);
5460 while (val) {
5461 j = find_first_bit(&val, 32);
5462 writel_relaxed(BIT(j), ctrl_status);
5463 /* ensure that interrupt is cleared (acked) */
5464 wmb();
5465 generic_handle_irq(
5466 irq_find_mapping(dev->irq_domain, (j + (32*i)))
5467 );
5468 val = readl_relaxed(ctrl_status);
5469 }
5470 }
5471
5472 return IRQ_HANDLED;
5473}
5474
5475static irqreturn_t handle_global_irq(int irq, void *data)
5476{
5477 int i;
5478 struct msm_pcie_dev_t *dev = data;
5479 unsigned long irqsave_flags;
5480 u32 status = 0;
5481
5482 spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
5483
5484 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
5485 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
5486
5487 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
5488
5489 PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
5490 dev->rc_idx, irq, status);
5491
5492 for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
5493 if (status & BIT(i)) {
5494 switch (i) {
5495 case MSM_PCIE_INT_EVT_LINK_DOWN:
5496 PCIE_DBG(dev,
5497 "PCIe: RC%d: handle linkdown event.\n",
5498 dev->rc_idx);
5499 handle_linkdown_irq(irq, data);
5500 break;
5501 case MSM_PCIE_INT_EVT_AER_LEGACY:
5502 PCIE_DBG(dev,
5503 "PCIe: RC%d: AER legacy event.\n",
5504 dev->rc_idx);
5505 handle_aer_irq(irq, data);
5506 break;
5507 case MSM_PCIE_INT_EVT_AER_ERR:
5508 PCIE_DBG(dev,
5509 "PCIe: RC%d: AER event.\n",
5510 dev->rc_idx);
5511 handle_aer_irq(irq, data);
5512 break;
5513 default:
5514 PCIE_ERR(dev,
5515 "PCIe: RC%d: Unexpected event %d is caught!\n",
5516 dev->rc_idx, i);
5517 }
5518 }
5519 }
5520
5521 spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
5522
5523 return IRQ_HANDLED;
5524}
5525
5526void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev)
5527{
5528 int pos, i;
5529 struct msm_pcie_dev_t *dev;
5530
5531 if (pcie_dev)
5532 dev = pcie_dev;
5533 else
5534 dev = irq_get_chip_data(irq);
5535
5536 if (!dev) {
5537 pr_err("PCIe: device is null. IRQ:%d\n", irq);
5538 return;
5539 }
5540
5541 if (dev->msi_gicm_addr) {
5542 PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
5543
5544 for (i = 0; i < MSM_PCIE_MAX_MSI; i++)
5545 if (irq == dev->msi[i].num)
5546 break;
5547 if (i == MSM_PCIE_MAX_MSI) {
5548 PCIE_ERR(dev,
5549 "Could not find irq: %d in RC%d MSI table\n",
5550 irq, dev->rc_idx);
5551 return;
5552 }
5553
5554 pos = i;
5555 } else {
5556 PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
5557 pos = irq - irq_find_mapping(dev->irq_domain, 0);
5558 }
5559
5560 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5561
5562 PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
5563 pos, *dev->msi_irq_in_use);
5564 clear_bit(pos, dev->msi_irq_in_use);
5565 PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
5566 pos, *dev->msi_irq_in_use);
5567}
5568
5569/* hookup to linux pci msi framework */
5570void arch_teardown_msi_irq(unsigned int irq)
5571{
5572 PCIE_GEN_DBG("irq %d deallocated\n", irq);
5573 msm_pcie_destroy_irq(irq, NULL);
5574}
5575
5576void arch_teardown_msi_irqs(struct pci_dev *dev)
5577{
5578 struct msi_desc *entry;
5579 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5580
5581 PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
5582 pcie_dev->rc_idx, dev->vendor, dev->device);
5583
5584 pcie_dev->use_msi = false;
5585
5586 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5587 int i, nvec;
5588
5589 if (entry->irq == 0)
5590 continue;
5591 nvec = 1 << entry->msi_attrib.multiple;
5592 for (i = 0; i < nvec; i++)
5593 msm_pcie_destroy_irq(entry->irq + i, pcie_dev);
5594 }
5595}
5596
5597static void msm_pcie_msi_nop(struct irq_data *d)
5598{
5599}
5600
5601static struct irq_chip pcie_msi_chip = {
5602 .name = "msm-pcie-msi",
5603 .irq_ack = msm_pcie_msi_nop,
5604 .irq_enable = unmask_msi_irq,
5605 .irq_disable = mask_msi_irq,
5606 .irq_mask = mask_msi_irq,
5607 .irq_unmask = unmask_msi_irq,
5608};
5609
5610static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
5611{
5612 int irq, pos;
5613
5614 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5615
5616again:
5617 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5618
5619 if (pos >= PCIE_MSI_NR_IRQS)
5620 return -ENOSPC;
5621
5622 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5623
5624 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5625 goto again;
5626 else
5627 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5628
5629 irq = irq_create_mapping(dev->irq_domain, pos);
5630 if (!irq)
5631 return -EINVAL;
5632
5633 return irq;
5634}
5635
5636static int arch_setup_msi_irq_default(struct pci_dev *pdev,
5637 struct msi_desc *desc, int nvec)
5638{
5639 int irq;
5640 struct msi_msg msg;
5641 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5642
5643 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5644
5645 irq = msm_pcie_create_irq(dev);
5646
5647 PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
5648
5649 if (irq < 0)
5650 return irq;
5651
5652 PCIE_DBG(dev, "irq %d allocated\n", irq);
5653
5654 irq_set_msi_desc(irq, desc);
5655
5656 /* write msi vector and data */
5657 msg.address_hi = 0;
5658 msg.address_lo = MSM_PCIE_MSI_PHY;
5659 msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
5660 write_msi_msg(irq, &msg);
5661
5662 return 0;
5663}
5664
5665static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
5666{
5667 int irq, pos;
5668
5669 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5670
5671again:
5672 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5673
5674 if (pos >= PCIE_MSI_NR_IRQS)
5675 return -ENOSPC;
5676
5677 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5678
5679 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5680 goto again;
5681 else
5682 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5683
5684 if (pos >= MSM_PCIE_MAX_MSI) {
5685 PCIE_ERR(dev,
5686 "PCIe: RC%d: pos %d is not less than %d\n",
5687 dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
5688 return MSM_PCIE_ERROR;
5689 }
5690
5691 irq = dev->msi[pos].num;
5692 if (!irq) {
5693 PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
5694 dev->rc_idx);
5695 return -EINVAL;
5696 }
5697
5698 return irq;
5699}
5700
5701static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
5702 struct msi_desc *desc, int nvec)
5703{
5704 int irq, index, firstirq = 0;
5705 struct msi_msg msg;
5706 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5707
5708 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5709
5710 for (index = 0; index < nvec; index++) {
5711 irq = msm_pcie_create_irq_qgic(dev);
5712 PCIE_DBG(dev, "irq %d is allocated\n", irq);
5713
5714 if (irq < 0)
5715 return irq;
5716
5717 if (index == 0)
5718 firstirq = irq;
5719
5720 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
5721 }
5722
5723 /* write msi vector and data */
5724 irq_set_msi_desc(firstirq, desc);
5725 msg.address_hi = 0;
5726 msg.address_lo = dev->msi_gicm_addr;
5727 msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
5728 write_msi_msg(firstirq, &msg);
5729
5730 return 0;
5731}
5732
5733int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
5734{
5735 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5736
5737 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5738
5739 if (dev->msi_gicm_addr)
5740 return arch_setup_msi_irq_qgic(pdev, desc, 1);
5741 else
5742 return arch_setup_msi_irq_default(pdev, desc, 1);
5743}
5744
5745static int msm_pcie_get_msi_multiple(int nvec)
5746{
5747 int msi_multiple = 0;
5748
5749 while (nvec) {
5750 nvec = nvec >> 1;
5751 msi_multiple++;
5752 }
5753 PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
5754 msi_multiple - 1);
5755
5756 return msi_multiple - 1;
5757}
5758
5759int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
5760{
5761 struct msi_desc *entry;
5762 int ret;
5763 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5764
5765 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5766
5767 if (type != PCI_CAP_ID_MSI || nvec > 32)
5768 return -ENOSPC;
5769
5770 PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
5771
5772 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5773 entry->msi_attrib.multiple =
5774 msm_pcie_get_msi_multiple(nvec);
5775
5776 if (pcie_dev->msi_gicm_addr)
5777 ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
5778 else
5779 ret = arch_setup_msi_irq_default(dev, entry, nvec);
5780
5781 PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
5782
5783 if (ret < 0)
5784 return ret;
5785 if (ret > 0)
5786 return -ENOSPC;
5787 }
5788
5789 pcie_dev->use_msi = true;
5790
5791 return 0;
5792}
5793
5794static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
5795 irq_hw_number_t hwirq)
5796{
5797 irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
5798 irq_set_chip_data(irq, domain->host_data);
5799 return 0;
5800}
5801
5802static const struct irq_domain_ops msm_pcie_msi_ops = {
5803 .map = msm_pcie_msi_map,
5804};
5805
5806int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
5807{
5808 int rc;
5809 int msi_start = 0;
5810 struct device *pdev = &dev->pdev->dev;
5811
5812 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5813
5814 if (dev->rc_idx)
5815 wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
5816 else
5817 wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
5818
5819 /* register handler for linkdown interrupt */
5820 if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
5821 rc = devm_request_irq(pdev,
5822 dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
5823 handle_linkdown_irq,
5824 IRQF_TRIGGER_RISING,
5825 dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
5826 dev);
5827 if (rc) {
5828 PCIE_ERR(dev,
5829 "PCIe: Unable to request linkdown interrupt:%d\n",
5830 dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
5831 return rc;
5832 }
5833 }
5834
5835 /* register handler for physical MSI interrupt line */
5836 if (dev->irq[MSM_PCIE_INT_MSI].num) {
5837 rc = devm_request_irq(pdev,
5838 dev->irq[MSM_PCIE_INT_MSI].num,
5839 handle_msi_irq,
5840 IRQF_TRIGGER_RISING,
5841 dev->irq[MSM_PCIE_INT_MSI].name,
5842 dev);
5843 if (rc) {
5844 PCIE_ERR(dev,
5845 "PCIe: RC%d: Unable to request MSI interrupt\n",
5846 dev->rc_idx);
5847 return rc;
5848 }
5849 }
5850
5851 /* register handler for AER interrupt */
5852 if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
5853 rc = devm_request_irq(pdev,
5854 dev->irq[MSM_PCIE_INT_PLS_ERR].num,
5855 handle_aer_irq,
5856 IRQF_TRIGGER_RISING,
5857 dev->irq[MSM_PCIE_INT_PLS_ERR].name,
5858 dev);
5859 if (rc) {
5860 PCIE_ERR(dev,
5861 "PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
5862 dev->rc_idx,
5863 dev->irq[MSM_PCIE_INT_PLS_ERR].num);
5864 return rc;
5865 }
5866 }
5867
5868 /* register handler for AER legacy interrupt */
5869 if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
5870 rc = devm_request_irq(pdev,
5871 dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
5872 handle_aer_irq,
5873 IRQF_TRIGGER_RISING,
5874 dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
5875 dev);
5876 if (rc) {
5877 PCIE_ERR(dev,
5878 "PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
5879 dev->rc_idx,
5880 dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
5881 return rc;
5882 }
5883 }
5884
5885 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
5886 rc = devm_request_irq(pdev,
5887 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
5888 handle_global_irq,
5889 IRQF_TRIGGER_RISING,
5890 dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
5891 dev);
5892 if (rc) {
5893 PCIE_ERR(dev,
5894 "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
5895 dev->rc_idx,
5896 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
5897 return rc;
5898 }
5899 }
5900
5901 /* register handler for PCIE_WAKE_N interrupt line */
5902 if (dev->wake_n) {
5903 rc = devm_request_irq(pdev,
5904 dev->wake_n, handle_wake_irq,
5905 IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
5906 if (rc) {
5907 PCIE_ERR(dev,
5908 "PCIe: RC%d: Unable to request wake interrupt\n",
5909 dev->rc_idx);
5910 return rc;
5911 }
5912
5913 INIT_WORK(&dev->handle_wake_work, handle_wake_func);
5914
5915 rc = enable_irq_wake(dev->wake_n);
5916 if (rc) {
5917 PCIE_ERR(dev,
5918 "PCIe: RC%d: Unable to enable wake interrupt\n",
5919 dev->rc_idx);
5920 return rc;
5921 }
5922 }
5923
5924 /* Create a virtual domain of interrupts */
5925 if (!dev->msi_gicm_addr) {
5926 dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
5927 PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
5928
5929 if (!dev->irq_domain) {
5930 PCIE_ERR(dev,
5931 "PCIe: RC%d: Unable to initialize irq domain\n",
5932 dev->rc_idx);
5933
5934 if (dev->wake_n)
5935 disable_irq(dev->wake_n);
5936
5937 return PTR_ERR(dev->irq_domain);
5938 }
5939
5940 msi_start = irq_create_mapping(dev->irq_domain, 0);
5941 }
5942
5943 return 0;
5944}
5945
5946void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
5947{
5948 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5949
5950 wakeup_source_trash(&dev->ws);
5951
5952 if (dev->wake_n)
5953 disable_irq(dev->wake_n);
5954}
5955
5956
5957static int msm_pcie_probe(struct platform_device *pdev)
5958{
5959 int ret = 0;
5960 int rc_idx = -1;
5961 int i, j;
5962
5963 PCIE_GEN_DBG("%s\n", __func__);
5964
5965 mutex_lock(&pcie_drv.drv_lock);
5966
5967 ret = of_property_read_u32((&pdev->dev)->of_node,
5968 "cell-index", &rc_idx);
5969 if (ret) {
5970 PCIE_GEN_DBG("Did not find RC index.\n");
5971 goto out;
5972 } else {
5973 if (rc_idx >= MAX_RC_NUM) {
5974 pr_err(
5975 "PCIe: Invalid RC Index %d (max supported = %d)\n",
5976 rc_idx, MAX_RC_NUM);
5977 goto out;
5978 }
5979 pcie_drv.rc_num++;
5980 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
5981 rc_idx);
5982 }
5983
5984 msm_pcie_dev[rc_idx].l0s_supported =
5985 of_property_read_bool((&pdev->dev)->of_node,
5986 "qcom,l0s-supported");
5987 PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
5988 msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
5989 msm_pcie_dev[rc_idx].l1_supported =
5990 of_property_read_bool((&pdev->dev)->of_node,
5991 "qcom,l1-supported");
5992 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
5993 msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
5994 msm_pcie_dev[rc_idx].l1ss_supported =
5995 of_property_read_bool((&pdev->dev)->of_node,
5996 "qcom,l1ss-supported");
5997 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
5998 msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
5999 msm_pcie_dev[rc_idx].common_clk_en =
6000 of_property_read_bool((&pdev->dev)->of_node,
6001 "qcom,common-clk-en");
6002 PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
6003 msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
6004 msm_pcie_dev[rc_idx].clk_power_manage_en =
6005 of_property_read_bool((&pdev->dev)->of_node,
6006 "qcom,clk-power-manage-en");
6007 PCIE_DBG(&msm_pcie_dev[rc_idx],
6008 "Clock power management is %s enabled.\n",
6009 msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
6010 msm_pcie_dev[rc_idx].aux_clk_sync =
6011 of_property_read_bool((&pdev->dev)->of_node,
6012 "qcom,aux-clk-sync");
6013 PCIE_DBG(&msm_pcie_dev[rc_idx],
6014 "AUX clock is %s synchronous to Core clock.\n",
6015 msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
6016
6017 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
6018 of_property_read_bool((&pdev->dev)->of_node,
6019 "qcom,use-19p2mhz-aux-clk");
6020 PCIE_DBG(&msm_pcie_dev[rc_idx],
6021 "AUX clock frequency is %s 19.2MHz.\n",
6022 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
6023
6024 msm_pcie_dev[rc_idx].smmu_exist =
6025 of_property_read_bool((&pdev->dev)->of_node,
6026 "qcom,smmu-exist");
6027 PCIE_DBG(&msm_pcie_dev[rc_idx],
6028 "SMMU does %s exist.\n",
6029 msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
6030
6031 msm_pcie_dev[rc_idx].smmu_sid_base = 0;
6032 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
6033 &msm_pcie_dev[rc_idx].smmu_sid_base);
6034 if (ret)
6035 PCIE_DBG(&msm_pcie_dev[rc_idx],
6036 "RC%d SMMU sid base not found\n",
6037 msm_pcie_dev[rc_idx].rc_idx);
6038 else
6039 PCIE_DBG(&msm_pcie_dev[rc_idx],
6040 "RC%d: qcom,smmu-sid-base: 0x%x.\n",
6041 msm_pcie_dev[rc_idx].rc_idx,
6042 msm_pcie_dev[rc_idx].smmu_sid_base);
6043
6044 msm_pcie_dev[rc_idx].ep_wakeirq =
6045 of_property_read_bool((&pdev->dev)->of_node,
6046 "qcom,ep-wakeirq");
6047 PCIE_DBG(&msm_pcie_dev[rc_idx],
6048 "PCIe: EP of RC%d does %s assert wake when it is up.\n",
6049 rc_idx, msm_pcie_dev[rc_idx].ep_wakeirq ? "" : "not");
6050
6051 msm_pcie_dev[rc_idx].phy_ver = 1;
6052 ret = of_property_read_u32((&pdev->dev)->of_node,
6053 "qcom,pcie-phy-ver",
6054 &msm_pcie_dev[rc_idx].phy_ver);
6055 if (ret)
6056 PCIE_DBG(&msm_pcie_dev[rc_idx],
6057 "RC%d: pcie-phy-ver does not exist.\n",
6058 msm_pcie_dev[rc_idx].rc_idx);
6059 else
6060 PCIE_DBG(&msm_pcie_dev[rc_idx],
6061 "RC%d: pcie-phy-ver: %d.\n",
6062 msm_pcie_dev[rc_idx].rc_idx,
6063 msm_pcie_dev[rc_idx].phy_ver);
6064
6065 msm_pcie_dev[rc_idx].n_fts = 0;
6066 ret = of_property_read_u32((&pdev->dev)->of_node,
6067 "qcom,n-fts",
6068 &msm_pcie_dev[rc_idx].n_fts);
6069
6070 if (ret)
6071 PCIE_DBG(&msm_pcie_dev[rc_idx],
6072 "n-fts does not exist. ret=%d\n", ret);
6073 else
6074 PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
6075 msm_pcie_dev[rc_idx].n_fts);
6076
6077 msm_pcie_dev[rc_idx].common_phy =
6078 of_property_read_bool((&pdev->dev)->of_node,
6079 "qcom,common-phy");
6080 PCIE_DBG(&msm_pcie_dev[rc_idx],
6081 "PCIe: RC%d: Common PHY does %s exist.\n",
6082 rc_idx, msm_pcie_dev[rc_idx].common_phy ? "" : "not");
6083
6084 msm_pcie_dev[rc_idx].ext_ref_clk =
6085 of_property_read_bool((&pdev->dev)->of_node,
6086 "qcom,ext-ref-clk");
6087 PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
6088 msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
6089
6090 msm_pcie_dev[rc_idx].ep_latency = 0;
6091 ret = of_property_read_u32((&pdev->dev)->of_node,
6092 "qcom,ep-latency",
6093 &msm_pcie_dev[rc_idx].ep_latency);
6094 if (ret)
6095 PCIE_DBG(&msm_pcie_dev[rc_idx],
6096 "RC%d: ep-latency does not exist.\n",
6097 rc_idx);
6098 else
6099 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
6100 rc_idx, msm_pcie_dev[rc_idx].ep_latency);
6101
6102 msm_pcie_dev[rc_idx].wr_halt_size = 0;
6103 ret = of_property_read_u32(pdev->dev.of_node,
6104 "qcom,wr-halt-size",
6105 &msm_pcie_dev[rc_idx].wr_halt_size);
6106 if (ret)
6107 PCIE_DBG(&msm_pcie_dev[rc_idx],
6108 "RC%d: wr-halt-size not specified in dt. Use default value.\n",
6109 rc_idx);
6110 else
6111 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
6112 rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
6113
6114 msm_pcie_dev[rc_idx].cpl_timeout = 0;
6115 ret = of_property_read_u32((&pdev->dev)->of_node,
6116 "qcom,cpl-timeout",
6117 &msm_pcie_dev[rc_idx].cpl_timeout);
6118 if (ret)
6119 PCIE_DBG(&msm_pcie_dev[rc_idx],
6120 "RC%d: Using default cpl-timeout.\n",
6121 rc_idx);
6122 else
6123 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
6124 rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
6125
6126 msm_pcie_dev[rc_idx].perst_delay_us_min =
6127 PERST_PROPAGATION_DELAY_US_MIN;
6128 ret = of_property_read_u32(pdev->dev.of_node,
6129 "qcom,perst-delay-us-min",
6130 &msm_pcie_dev[rc_idx].perst_delay_us_min);
6131 if (ret)
6132 PCIE_DBG(&msm_pcie_dev[rc_idx],
6133 "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
6134 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6135 else
6136 PCIE_DBG(&msm_pcie_dev[rc_idx],
6137 "RC%d: perst-delay-us-min: %dus.\n",
6138 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6139
6140 msm_pcie_dev[rc_idx].perst_delay_us_max =
6141 PERST_PROPAGATION_DELAY_US_MAX;
6142 ret = of_property_read_u32(pdev->dev.of_node,
6143 "qcom,perst-delay-us-max",
6144 &msm_pcie_dev[rc_idx].perst_delay_us_max);
6145 if (ret)
6146 PCIE_DBG(&msm_pcie_dev[rc_idx],
6147 "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
6148 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6149 else
6150 PCIE_DBG(&msm_pcie_dev[rc_idx],
6151 "RC%d: perst-delay-us-max: %dus.\n",
6152 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6153
6154 msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
6155 ret = of_property_read_u32(pdev->dev.of_node,
6156 "qcom,tlp-rd-size",
6157 &msm_pcie_dev[rc_idx].tlp_rd_size);
6158 if (ret)
6159 PCIE_DBG(&msm_pcie_dev[rc_idx],
6160 "RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
6161 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6162 else
6163 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
6164 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6165
6166 msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
6167 msm_pcie_dev[rc_idx].msi_gicm_base = 0;
6168 ret = of_property_read_u32((&pdev->dev)->of_node,
6169 "qcom,msi-gicm-addr",
6170 &msm_pcie_dev[rc_idx].msi_gicm_addr);
6171
6172 if (ret) {
6173 PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
6174 "msi-gicm-addr does not exist.\n");
6175 } else {
6176 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
6177 msm_pcie_dev[rc_idx].msi_gicm_addr);
6178
6179 ret = of_property_read_u32((&pdev->dev)->of_node,
6180 "qcom,msi-gicm-base",
6181 &msm_pcie_dev[rc_idx].msi_gicm_base);
6182
6183 if (ret) {
6184 PCIE_ERR(&msm_pcie_dev[rc_idx],
6185 "PCIe: RC%d: msi-gicm-base does not exist.\n",
6186 rc_idx);
6187 goto decrease_rc_num;
6188 } else {
6189 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
6190 msm_pcie_dev[rc_idx].msi_gicm_base);
6191 }
6192 }
6193
6194 msm_pcie_dev[rc_idx].scm_dev_id = 0;
6195 ret = of_property_read_u32((&pdev->dev)->of_node,
6196 "qcom,scm-dev-id",
6197 &msm_pcie_dev[rc_idx].scm_dev_id);
6198
6199 msm_pcie_dev[rc_idx].rc_idx = rc_idx;
6200 msm_pcie_dev[rc_idx].pdev = pdev;
6201 msm_pcie_dev[rc_idx].vreg_n = 0;
6202 msm_pcie_dev[rc_idx].gpio_n = 0;
6203 msm_pcie_dev[rc_idx].parf_deemph = 0;
6204 msm_pcie_dev[rc_idx].parf_swing = 0;
6205 msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
6206 msm_pcie_dev[rc_idx].user_suspend = false;
6207 msm_pcie_dev[rc_idx].disable_pc = false;
6208 msm_pcie_dev[rc_idx].saved_state = NULL;
6209 msm_pcie_dev[rc_idx].enumerated = false;
6210 msm_pcie_dev[rc_idx].num_active_ep = 0;
6211 msm_pcie_dev[rc_idx].num_ep = 0;
6212 msm_pcie_dev[rc_idx].pending_ep_reg = false;
6213 msm_pcie_dev[rc_idx].phy_len = 0;
6214 msm_pcie_dev[rc_idx].port_phy_len = 0;
6215 msm_pcie_dev[rc_idx].phy_sequence = NULL;
6216 msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
6217 msm_pcie_dev[rc_idx].event_reg = NULL;
6218 msm_pcie_dev[rc_idx].linkdown_counter = 0;
6219 msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
6220 msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
6221 msm_pcie_dev[rc_idx].rc_corr_counter = 0;
6222 msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
6223 msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
6224 msm_pcie_dev[rc_idx].ep_corr_counter = 0;
6225 msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
6226 msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
6227 msm_pcie_dev[rc_idx].suspending = false;
6228 msm_pcie_dev[rc_idx].wake_counter = 0;
6229 msm_pcie_dev[rc_idx].aer_enable = true;
6230 msm_pcie_dev[rc_idx].power_on = false;
6231 msm_pcie_dev[rc_idx].current_short_bdf = 0;
6232 msm_pcie_dev[rc_idx].use_msi = false;
6233 msm_pcie_dev[rc_idx].use_pinctrl = false;
6234 msm_pcie_dev[rc_idx].linkdown_panic = false;
6235 msm_pcie_dev[rc_idx].bridge_found = false;
6236 memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
6237 sizeof(msm_pcie_vreg_info));
6238 memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
6239 sizeof(msm_pcie_gpio_info));
6240 memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
6241 sizeof(msm_pcie_clk_info[rc_idx]));
6242 memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
6243 sizeof(msm_pcie_pipe_clk_info[rc_idx]));
6244 memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
6245 sizeof(msm_pcie_res_info));
6246 memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
6247 sizeof(msm_pcie_irq_info));
6248 memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
6249 sizeof(msm_pcie_msi_info));
6250 memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
6251 sizeof(msm_pcie_reset_info[rc_idx]));
6252 memcpy(msm_pcie_dev[rc_idx].pipe_reset,
6253 msm_pcie_pipe_reset_info[rc_idx],
6254 sizeof(msm_pcie_pipe_reset_info[rc_idx]));
6255 msm_pcie_dev[rc_idx].shadow_en = true;
6256 for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
6257 msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
6258 for (i = 0; i < MAX_DEVICE_NUM; i++)
6259 for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
6260 msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
6261 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6262 msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
6263 msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
6264 msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
6265 msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
6266 msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
6267 msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0;
6268 msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
6269 msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
6270 msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
6271 msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
6272 }
6273
6274 ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
6275 msm_pcie_dev[rc_idx].pdev);
6276
6277 if (ret)
6278 goto decrease_rc_num;
6279
6280 msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
6281 if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
6282 PCIE_ERR(&msm_pcie_dev[rc_idx],
6283 "PCIe: RC%d failed to get pinctrl\n",
6284 rc_idx);
6285 else
6286 msm_pcie_dev[rc_idx].use_pinctrl = true;
6287
6288 if (msm_pcie_dev[rc_idx].use_pinctrl) {
6289 msm_pcie_dev[rc_idx].pins_default =
6290 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6291 "default");
6292 if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
6293 PCIE_ERR(&msm_pcie_dev[rc_idx],
6294 "PCIe: RC%d could not get pinctrl default state\n",
6295 rc_idx);
6296 msm_pcie_dev[rc_idx].pins_default = NULL;
6297 }
6298
6299 msm_pcie_dev[rc_idx].pins_sleep =
6300 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6301 "sleep");
6302 if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
6303 PCIE_ERR(&msm_pcie_dev[rc_idx],
6304 "PCIe: RC%d could not get pinctrl sleep state\n",
6305 rc_idx);
6306 msm_pcie_dev[rc_idx].pins_sleep = NULL;
6307 }
6308 }
6309
6310 ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
6311 if (ret) {
6312 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6313 goto decrease_rc_num;
6314 }
6315
6316 ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
6317 if (ret) {
6318 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6319 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6320 goto decrease_rc_num;
6321 }
6322
6323 msm_pcie_dev[rc_idx].drv_ready = true;
6324
6325 if (msm_pcie_dev[rc_idx].ep_wakeirq) {
6326 PCIE_DBG(&msm_pcie_dev[rc_idx],
6327 "PCIe: RC%d will be enumerated upon WAKE signal from Endpoint.\n",
6328 rc_idx);
6329 mutex_unlock(&pcie_drv.drv_lock);
6330 return 0;
6331 }
6332
6333 ret = msm_pcie_enumerate(rc_idx);
6334
6335 if (ret)
6336 PCIE_ERR(&msm_pcie_dev[rc_idx],
6337 "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
6338 rc_idx);
6339 else
6340 PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
6341 rc_idx);
6342
6343 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
6344 dev_name(&(pdev->dev)));
6345
6346 mutex_unlock(&pcie_drv.drv_lock);
6347 return 0;
6348
6349decrease_rc_num:
6350 pcie_drv.rc_num--;
6351out:
6352 if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
6353 pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
6354 rc_idx);
6355 else
6356 PCIE_ERR(&msm_pcie_dev[rc_idx],
6357 "PCIe: Driver probe failed for RC%d:%d\n",
6358 rc_idx, ret);
6359
6360 mutex_unlock(&pcie_drv.drv_lock);
6361
6362 return ret;
6363}
6364
6365static int msm_pcie_remove(struct platform_device *pdev)
6366{
6367 int ret = 0;
6368 int rc_idx;
6369
6370 PCIE_GEN_DBG("PCIe:%s.\n", __func__);
6371
6372 mutex_lock(&pcie_drv.drv_lock);
6373
6374 ret = of_property_read_u32((&pdev->dev)->of_node,
6375 "cell-index", &rc_idx);
6376 if (ret) {
6377 pr_err("%s: Did not find RC index.\n", __func__);
6378 goto out;
6379 } else {
6380 pcie_drv.rc_num--;
6381 PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
6382 }
6383
6384 msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
6385 msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
6386 msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
6387 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6388 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6389
6390out:
6391 mutex_unlock(&pcie_drv.drv_lock);
6392
6393 return ret;
6394}
6395
6396static const struct of_device_id msm_pcie_match[] = {
6397 { .compatible = "qcom,pci-msm",
6398 },
6399 {}
6400};
6401
6402static struct platform_driver msm_pcie_driver = {
6403 .probe = msm_pcie_probe,
6404 .remove = msm_pcie_remove,
6405 .driver = {
6406 .name = "pci-msm",
6407 .owner = THIS_MODULE,
6408 .of_match_table = msm_pcie_match,
6409 },
6410};
6411
6412int __init pcie_init(void)
6413{
6414 int ret = 0, i;
6415 char rc_name[MAX_RC_NAME_LEN];
6416
6417 pr_alert("pcie:%s.\n", __func__);
6418
6419 pcie_drv.rc_num = 0;
6420 mutex_init(&pcie_drv.drv_lock);
6421 mutex_init(&com_phy_lock);
6422
6423 for (i = 0; i < MAX_RC_NUM; i++) {
6424 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
6425 msm_pcie_dev[i].ipc_log =
6426 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6427 if (msm_pcie_dev[i].ipc_log == NULL)
6428 pr_err("%s: unable to create IPC log context for %s\n",
6429 __func__, rc_name);
6430 else
6431 PCIE_DBG(&msm_pcie_dev[i],
6432 "PCIe IPC logging is enable for RC%d\n",
6433 i);
6434 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
6435 msm_pcie_dev[i].ipc_log_long =
6436 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6437 if (msm_pcie_dev[i].ipc_log_long == NULL)
6438 pr_err("%s: unable to create IPC log context for %s\n",
6439 __func__, rc_name);
6440 else
6441 PCIE_DBG(&msm_pcie_dev[i],
6442 "PCIe IPC logging %s is enable for RC%d\n",
6443 rc_name, i);
6444 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
6445 msm_pcie_dev[i].ipc_log_dump =
6446 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6447 if (msm_pcie_dev[i].ipc_log_dump == NULL)
6448 pr_err("%s: unable to create IPC log context for %s\n",
6449 __func__, rc_name);
6450 else
6451 PCIE_DBG(&msm_pcie_dev[i],
6452 "PCIe IPC logging %s is enable for RC%d\n",
6453 rc_name, i);
6454 spin_lock_init(&msm_pcie_dev[i].cfg_lock);
6455 msm_pcie_dev[i].cfg_access = true;
6456 mutex_init(&msm_pcie_dev[i].enumerate_lock);
6457 mutex_init(&msm_pcie_dev[i].setup_lock);
6458 mutex_init(&msm_pcie_dev[i].recovery_lock);
6459 spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
6460 spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
6461 spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
6462 spin_lock_init(&msm_pcie_dev[i].aer_lock);
6463 msm_pcie_dev[i].drv_ready = false;
6464 }
6465 for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
6466 msm_pcie_dev_tbl[i].bdf = 0;
6467 msm_pcie_dev_tbl[i].dev = NULL;
6468 msm_pcie_dev_tbl[i].short_bdf = 0;
6469 msm_pcie_dev_tbl[i].sid = 0;
6470 msm_pcie_dev_tbl[i].domain = -1;
6471 msm_pcie_dev_tbl[i].conf_base = 0;
6472 msm_pcie_dev_tbl[i].phy_address = 0;
6473 msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
6474 msm_pcie_dev_tbl[i].event_reg = NULL;
6475 msm_pcie_dev_tbl[i].registered = true;
6476 }
6477
6478 msm_pcie_debugfs_init();
6479
6480 ret = platform_driver_register(&msm_pcie_driver);
6481
6482 return ret;
6483}
6484
6485static void __exit pcie_exit(void)
6486{
6487 PCIE_GEN_DBG("pcie:%s.\n", __func__);
6488
6489 platform_driver_unregister(&msm_pcie_driver);
6490
6491 msm_pcie_debugfs_exit();
6492}
6493
6494subsys_initcall_sync(pcie_init);
6495module_exit(pcie_exit);
6496
6497
6498/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
6499static void msm_pcie_fixup_early(struct pci_dev *dev)
6500{
6501 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6502
6503 PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
6504 if (dev->hdr_type == 1)
6505 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
6506}
6507DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6508 msm_pcie_fixup_early);
6509
6510/* Suspend the PCIe link */
6511static int msm_pcie_pm_suspend(struct pci_dev *dev,
6512 void *user, void *data, u32 options)
6513{
6514 int ret = 0;
6515 u32 val = 0;
6516 int ret_l23;
6517 unsigned long irqsave_flags;
6518 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6519
6520 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6521
6522 spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
6523 pcie_dev->suspending = true;
6524 spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
6525
6526 if (!pcie_dev->power_on) {
6527 PCIE_DBG(pcie_dev,
6528 "PCIe: power of RC%d has been turned off.\n",
6529 pcie_dev->rc_idx);
6530 return ret;
6531 }
6532
6533 if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
6534 && msm_pcie_confirm_linkup(pcie_dev, true, true,
6535 pcie_dev->conf)) {
6536 ret = pci_save_state(dev);
6537 pcie_dev->saved_state = pci_store_saved_state(dev);
6538 }
6539 if (ret) {
6540 PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
6541 pcie_dev->rc_idx, ret);
6542 pcie_dev->suspending = false;
6543 return ret;
6544 }
6545
6546 spin_lock_irqsave(&pcie_dev->cfg_lock,
6547 pcie_dev->irqsave_flags);
6548 pcie_dev->cfg_access = false;
6549 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6550 pcie_dev->irqsave_flags);
6551
6552 msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
6553 BIT(4));
6554
6555 PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
6556 pcie_dev->rc_idx);
6557
6558 ret_l23 = readl_poll_timeout((pcie_dev->parf
6559 + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
6560
6561 /* check L23_Ready */
6562 PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
6563 pcie_dev->rc_idx,
6564 readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
6565 if (!ret_l23)
6566 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
6567 pcie_dev->rc_idx);
6568 else
6569 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
6570 pcie_dev->rc_idx);
6571
6572 msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6573
6574 if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
6575 pinctrl_select_state(pcie_dev->pinctrl,
6576 pcie_dev->pins_sleep);
6577
6578 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6579
6580 return ret;
6581}
6582
6583static void msm_pcie_fixup_suspend(struct pci_dev *dev)
6584{
6585 int ret;
6586 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6587
6588 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6589
6590 if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
6591 return;
6592
6593 spin_lock_irqsave(&pcie_dev->cfg_lock,
6594 pcie_dev->irqsave_flags);
6595 if (pcie_dev->disable_pc) {
6596 PCIE_DBG(pcie_dev,
6597 "RC%d: Skip suspend because of user request\n",
6598 pcie_dev->rc_idx);
6599 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6600 pcie_dev->irqsave_flags);
6601 return;
6602 }
6603 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6604 pcie_dev->irqsave_flags);
6605
6606 mutex_lock(&pcie_dev->recovery_lock);
6607
6608 ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
6609 if (ret)
6610 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
6611 pcie_dev->rc_idx, ret);
6612
6613 mutex_unlock(&pcie_dev->recovery_lock);
6614}
6615DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6616 msm_pcie_fixup_suspend);
6617
6618/* Resume the PCIe link */
6619static int msm_pcie_pm_resume(struct pci_dev *dev,
6620 void *user, void *data, u32 options)
6621{
6622 int ret;
6623 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6624
6625 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6626
6627 if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
6628 pinctrl_select_state(pcie_dev->pinctrl,
6629 pcie_dev->pins_default);
6630
6631 spin_lock_irqsave(&pcie_dev->cfg_lock,
6632 pcie_dev->irqsave_flags);
6633 pcie_dev->cfg_access = true;
6634 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6635 pcie_dev->irqsave_flags);
6636
6637 ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6638 if (ret) {
6639 PCIE_ERR(pcie_dev,
6640 "PCIe: RC%d fail to enable PCIe link in resume.\n",
6641 pcie_dev->rc_idx);
6642 return ret;
6643 }
6644
6645 pcie_dev->suspending = false;
6646 PCIE_DBG(pcie_dev,
6647 "dev->bus->number = %d dev->bus->primary = %d\n",
6648 dev->bus->number, dev->bus->primary);
6649
6650 if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
6651 PCIE_DBG(pcie_dev,
6652 "RC%d: entry of PCI framework restore state\n",
6653 pcie_dev->rc_idx);
6654
6655 pci_load_and_free_saved_state(dev,
6656 &pcie_dev->saved_state);
6657 pci_restore_state(dev);
6658
6659 PCIE_DBG(pcie_dev,
6660 "RC%d: exit of PCI framework restore state\n",
6661 pcie_dev->rc_idx);
6662 }
6663
6664 if (pcie_dev->bridge_found) {
6665 PCIE_DBG(pcie_dev,
6666 "RC%d: entry of PCIe recover config\n",
6667 pcie_dev->rc_idx);
6668
6669 msm_pcie_recover_config(dev);
6670
6671 PCIE_DBG(pcie_dev,
6672 "RC%d: exit of PCIe recover config\n",
6673 pcie_dev->rc_idx);
6674 }
6675
6676 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6677
6678 return ret;
6679}
6680
6681void msm_pcie_fixup_resume(struct pci_dev *dev)
6682{
6683 int ret;
6684 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6685
6686 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6687
6688 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6689 pcie_dev->user_suspend)
6690 return;
6691
6692 mutex_lock(&pcie_dev->recovery_lock);
6693 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6694 if (ret)
6695 PCIE_ERR(pcie_dev,
6696 "PCIe: RC%d got failure in fixup resume:%d.\n",
6697 pcie_dev->rc_idx, ret);
6698
6699 mutex_unlock(&pcie_dev->recovery_lock);
6700}
6701DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6702 msm_pcie_fixup_resume);
6703
6704void msm_pcie_fixup_resume_early(struct pci_dev *dev)
6705{
6706 int ret;
6707 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6708
6709 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6710
6711 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6712 pcie_dev->user_suspend)
6713 return;
6714
6715 mutex_lock(&pcie_dev->recovery_lock);
6716 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6717 if (ret)
6718 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
6719 pcie_dev->rc_idx, ret);
6720
6721 mutex_unlock(&pcie_dev->recovery_lock);
6722}
6723DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6724 msm_pcie_fixup_resume_early);
6725
6726int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
6727 void *data, u32 options)
6728{
6729 int i, ret = 0;
6730 struct pci_dev *dev;
6731 u32 rc_idx = 0;
6732 struct msm_pcie_dev_t *pcie_dev;
6733
6734 PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
6735 pm_opt, busnr, options);
6736
6737
6738 if (!user) {
6739 pr_err("PCIe: endpoint device is NULL\n");
6740 ret = -ENODEV;
6741 goto out;
6742 }
6743
6744 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
6745
6746 if (pcie_dev) {
6747 rc_idx = pcie_dev->rc_idx;
6748 PCIE_DBG(pcie_dev,
6749 "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
6750 rc_idx, pm_opt, busnr, options);
6751 } else {
6752 pr_err(
6753 "PCIe: did not find RC for pci endpoint device.\n"
6754 );
6755 ret = -ENODEV;
6756 goto out;
6757 }
6758
6759 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6760 if (!busnr)
6761 break;
6762 if (user == pcie_dev->pcidev_table[i].dev) {
6763 if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
6764 break;
6765
6766 PCIE_ERR(pcie_dev,
6767 "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
6768 pcie_dev->rc_idx, busnr,
6769 pcie_dev->pcidev_table[i].bdf >> 24);
6770 ret = MSM_PCIE_ERROR;
6771 goto out;
6772 }
6773 }
6774
6775 if (i == MAX_DEVICE_NUM) {
6776 PCIE_ERR(pcie_dev,
6777 "PCIe: RC%d: endpoint device was not found in device table",
6778 pcie_dev->rc_idx);
6779 ret = MSM_PCIE_ERROR;
6780 goto out;
6781 }
6782
6783 dev = msm_pcie_dev[rc_idx].dev;
6784
6785 if (!msm_pcie_dev[rc_idx].drv_ready) {
6786 PCIE_ERR(&msm_pcie_dev[rc_idx],
6787 "RC%d has not been successfully probed yet\n",
6788 rc_idx);
6789 return -EPROBE_DEFER;
6790 }
6791
6792 switch (pm_opt) {
6793 case MSM_PCIE_SUSPEND:
6794 PCIE_DBG(&msm_pcie_dev[rc_idx],
6795 "User of RC%d requests to suspend the link\n", rc_idx);
6796 if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
6797 PCIE_DBG(&msm_pcie_dev[rc_idx],
6798 "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
6799 rc_idx, msm_pcie_dev[rc_idx].link_status);
6800
6801 if (!msm_pcie_dev[rc_idx].power_on) {
6802 PCIE_ERR(&msm_pcie_dev[rc_idx],
6803 "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
6804 rc_idx, msm_pcie_dev[rc_idx].link_status);
6805 break;
6806 }
6807
6808 if (msm_pcie_dev[rc_idx].pending_ep_reg) {
6809 PCIE_DBG(&msm_pcie_dev[rc_idx],
6810 "PCIe: RC%d: request to suspend the link is rejected\n",
6811 rc_idx);
6812 break;
6813 }
6814
6815 if (pcie_dev->num_active_ep) {
6816 PCIE_DBG(pcie_dev,
6817 "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
6818 pcie_dev->rc_idx, pcie_dev->num_active_ep);
6819 return ret;
6820 }
6821
6822 msm_pcie_dev[rc_idx].user_suspend = true;
6823
6824 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6825
6826 ret = msm_pcie_pm_suspend(dev, user, data, options);
6827 if (ret) {
6828 PCIE_ERR(&msm_pcie_dev[rc_idx],
6829 "PCIe: RC%d: user failed to suspend the link.\n",
6830 rc_idx);
6831 msm_pcie_dev[rc_idx].user_suspend = false;
6832 }
6833
6834 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6835 break;
6836 case MSM_PCIE_RESUME:
6837 PCIE_DBG(&msm_pcie_dev[rc_idx],
6838 "User of RC%d requests to resume the link\n", rc_idx);
6839 if (msm_pcie_dev[rc_idx].link_status !=
6840 MSM_PCIE_LINK_DISABLED) {
6841 PCIE_ERR(&msm_pcie_dev[rc_idx],
6842 "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
6843 rc_idx, msm_pcie_dev[rc_idx].link_status,
6844 msm_pcie_dev[rc_idx].num_active_ep);
6845 break;
6846 }
6847
6848 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6849 ret = msm_pcie_pm_resume(dev, user, data, options);
6850 if (ret) {
6851 PCIE_ERR(&msm_pcie_dev[rc_idx],
6852 "PCIe: RC%d: user failed to resume the link.\n",
6853 rc_idx);
6854 } else {
6855 PCIE_DBG(&msm_pcie_dev[rc_idx],
6856 "PCIe: RC%d: user succeeded to resume the link.\n",
6857 rc_idx);
6858
6859 msm_pcie_dev[rc_idx].user_suspend = false;
6860 }
6861
6862 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6863
6864 break;
6865 case MSM_PCIE_DISABLE_PC:
6866 PCIE_DBG(&msm_pcie_dev[rc_idx],
6867 "User of RC%d requests to keep the link always alive.\n",
6868 rc_idx);
6869 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6870 msm_pcie_dev[rc_idx].irqsave_flags);
6871 if (msm_pcie_dev[rc_idx].suspending) {
6872 PCIE_ERR(&msm_pcie_dev[rc_idx],
6873 "PCIe: RC%d Link has been suspended before request\n",
6874 rc_idx);
6875 ret = MSM_PCIE_ERROR;
6876 } else {
6877 msm_pcie_dev[rc_idx].disable_pc = true;
6878 }
6879 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6880 msm_pcie_dev[rc_idx].irqsave_flags);
6881 break;
6882 case MSM_PCIE_ENABLE_PC:
6883 PCIE_DBG(&msm_pcie_dev[rc_idx],
6884 "User of RC%d cancels the request of alive link.\n",
6885 rc_idx);
6886 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6887 msm_pcie_dev[rc_idx].irqsave_flags);
6888 msm_pcie_dev[rc_idx].disable_pc = false;
6889 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6890 msm_pcie_dev[rc_idx].irqsave_flags);
6891 break;
6892 default:
6893 PCIE_ERR(&msm_pcie_dev[rc_idx],
6894 "PCIe: RC%d: unsupported pm operation:%d.\n",
6895 rc_idx, pm_opt);
6896 ret = -ENODEV;
6897 goto out;
6898 }
6899
6900out:
6901 return ret;
6902}
6903EXPORT_SYMBOL(msm_pcie_pm_control);
6904
6905int msm_pcie_register_event(struct msm_pcie_register_event *reg)
6906{
6907 int i, ret = 0;
6908 struct msm_pcie_dev_t *pcie_dev;
6909
6910 if (!reg) {
6911 pr_err("PCIe: Event registration is NULL\n");
6912 return -ENODEV;
6913 }
6914
6915 if (!reg->user) {
6916 pr_err("PCIe: User of event registration is NULL\n");
6917 return -ENODEV;
6918 }
6919
6920 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6921
6922 if (!pcie_dev) {
6923 PCIE_ERR(pcie_dev, "%s",
6924 "PCIe: did not find RC for pci endpoint device.\n");
6925 return -ENODEV;
6926 }
6927
6928 if (pcie_dev->num_ep > 1) {
6929 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6930 if (reg->user ==
6931 pcie_dev->pcidev_table[i].dev) {
6932 pcie_dev->event_reg =
6933 pcie_dev->pcidev_table[i].event_reg;
6934
6935 if (!pcie_dev->event_reg) {
6936 pcie_dev->pcidev_table[i].registered =
6937 true;
6938
6939 pcie_dev->num_active_ep++;
6940 PCIE_DBG(pcie_dev,
6941 "PCIe: RC%d: number of active EP(s): %d.\n",
6942 pcie_dev->rc_idx,
6943 pcie_dev->num_active_ep);
6944 }
6945
6946 pcie_dev->event_reg = reg;
6947 pcie_dev->pcidev_table[i].event_reg = reg;
6948 PCIE_DBG(pcie_dev,
6949 "Event 0x%x is registered for RC %d\n",
6950 reg->events,
6951 pcie_dev->rc_idx);
6952
6953 break;
6954 }
6955 }
6956
6957 if (pcie_dev->pending_ep_reg) {
6958 for (i = 0; i < MAX_DEVICE_NUM; i++)
6959 if (!pcie_dev->pcidev_table[i].registered)
6960 break;
6961
6962 if (i == MAX_DEVICE_NUM)
6963 pcie_dev->pending_ep_reg = false;
6964 }
6965 } else {
6966 pcie_dev->event_reg = reg;
6967 PCIE_DBG(pcie_dev,
6968 "Event 0x%x is registered for RC %d\n", reg->events,
6969 pcie_dev->rc_idx);
6970 }
6971
6972 return ret;
6973}
6974EXPORT_SYMBOL(msm_pcie_register_event);
6975
6976int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
6977{
6978 int i, ret = 0;
6979 struct msm_pcie_dev_t *pcie_dev;
6980
6981 if (!reg) {
6982 pr_err("PCIe: Event deregistration is NULL\n");
6983 return -ENODEV;
6984 }
6985
6986 if (!reg->user) {
6987 pr_err("PCIe: User of event deregistration is NULL\n");
6988 return -ENODEV;
6989 }
6990
6991 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6992
6993 if (!pcie_dev) {
6994 PCIE_ERR(pcie_dev, "%s",
6995 "PCIe: did not find RC for pci endpoint device.\n");
6996 return -ENODEV;
6997 }
6998
6999 if (pcie_dev->num_ep > 1) {
7000 for (i = 0; i < MAX_DEVICE_NUM; i++) {
7001 if (reg->user == pcie_dev->pcidev_table[i].dev) {
7002 if (pcie_dev->pcidev_table[i].event_reg) {
7003 pcie_dev->num_active_ep--;
7004 PCIE_DBG(pcie_dev,
7005 "PCIe: RC%d: number of active EP(s) left: %d.\n",
7006 pcie_dev->rc_idx,
7007 pcie_dev->num_active_ep);
7008 }
7009
7010 pcie_dev->event_reg = NULL;
7011 pcie_dev->pcidev_table[i].event_reg = NULL;
7012 PCIE_DBG(pcie_dev,
7013 "Event is deregistered for RC %d\n",
7014 pcie_dev->rc_idx);
7015
7016 break;
7017 }
7018 }
7019 } else {
7020 pcie_dev->event_reg = NULL;
7021 PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
7022 pcie_dev->rc_idx);
7023 }
7024
7025 return ret;
7026}
7027EXPORT_SYMBOL(msm_pcie_deregister_event);
7028
7029int msm_pcie_recover_config(struct pci_dev *dev)
7030{
7031 int ret = 0;
7032 struct msm_pcie_dev_t *pcie_dev;
7033
7034 if (dev) {
7035 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7036 PCIE_DBG(pcie_dev,
7037 "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
7038 } else {
7039 pr_err("PCIe: the input pci dev is NULL.\n");
7040 return -ENODEV;
7041 }
7042
7043 if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
7044 PCIE_DBG(pcie_dev,
7045 "Recover config space of RC%d and its EP\n",
7046 pcie_dev->rc_idx);
7047 pcie_dev->shadow_en = false;
7048 PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
7049 msm_pcie_cfg_recover(pcie_dev, true);
7050 PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
7051 msm_pcie_cfg_recover(pcie_dev, false);
7052 PCIE_DBG(pcie_dev,
7053 "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
7054 pcie_dev->rc_idx);
7055 pci_save_state(pcie_dev->dev);
7056 pci_save_state(dev);
7057 pcie_dev->shadow_en = true;
7058 PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
7059 pcie_dev->rc_idx);
7060 } else {
7061 PCIE_ERR(pcie_dev,
7062 "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
7063 pcie_dev->rc_idx);
7064 ret = -ENODEV;
7065 }
7066
7067 return ret;
7068}
7069EXPORT_SYMBOL(msm_pcie_recover_config);
7070
7071int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
7072{
7073 int ret = 0;
7074 struct msm_pcie_dev_t *pcie_dev;
7075
7076 if (dev) {
7077 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7078 PCIE_DBG(pcie_dev,
7079 "User requests to %s shadow\n",
7080 enable ? "enable" : "disable");
7081 } else {
7082 pr_err("PCIe: the input pci dev is NULL.\n");
7083 return -ENODEV;
7084 }
7085
7086 PCIE_DBG(pcie_dev,
7087 "The shadowing of RC%d is %s enabled currently.\n",
7088 pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
7089
7090 pcie_dev->shadow_en = enable;
7091
7092 PCIE_DBG(pcie_dev,
7093 "Shadowing of RC%d is turned %s upon user's request.\n",
7094 pcie_dev->rc_idx, enable ? "on" : "off");
7095
7096 return ret;
7097}
7098EXPORT_SYMBOL(msm_pcie_shadow_control);