blob: a339496ffd0d81b89061e336a44b8f6cf243e1e9 [file] [log] [blame]
Tony Truong349ee492014-10-01 17:35:56 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/gpio.h>
23#include <linux/iopoll.h>
24#include <linux/kernel.h>
25#include <linux/of_pci.h>
26#include <linux/pci.h>
27#include <linux/platform_device.h>
28#include <linux/regulator/consumer.h>
29#include <linux/regulator/rpm-smd-regulator.h>
30#include <linux/slab.h>
31#include <linux/types.h>
32#include <linux/of_gpio.h>
33#include <linux/clk/msm-clk.h>
34#include <linux/reset.h>
35#include <linux/msm-bus.h>
36#include <linux/msm-bus-board.h>
37#include <linux/debugfs.h>
38#include <linux/uaccess.h>
39#include <linux/io.h>
40#include <linux/msi.h>
41#include <linux/interrupt.h>
42#include <linux/irq.h>
43#include <linux/irqdomain.h>
44#include <linux/pm_wakeup.h>
45#include <linux/compiler.h>
46#include <soc/qcom/scm.h>
47#include <linux/ipc_logging.h>
48#include <linux/msm_pcie.h>
49
50#ifdef CONFIG_ARCH_MDMCALIFORNIUM
51#define PCIE_VENDOR_ID_RCP 0x17cb
52#define PCIE_DEVICE_ID_RCP 0x0302
53
54#define PCIE20_L1SUB_CONTROL1 0x158
55#define PCIE20_PARF_DBI_BASE_ADDR 0x350
56#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
57
58#define TX_BASE 0x200
59#define RX_BASE 0x400
60#define PCS_BASE 0x800
61#define PCS_MISC_BASE 0x600
62
63#elif defined(CONFIG_ARCH_MSM8998)
64#define PCIE_VENDOR_ID_RCP 0x17cb
65#define PCIE_DEVICE_ID_RCP 0x0105
66
67#define PCIE20_L1SUB_CONTROL1 0x1E4
68#define PCIE20_PARF_DBI_BASE_ADDR 0x350
69#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
70
71#define TX_BASE 0
72#define RX_BASE 0
73#define PCS_BASE 0x800
74#define PCS_MISC_BASE 0
75
76#else
77#define PCIE_VENDOR_ID_RCP 0x17cb
78#define PCIE_DEVICE_ID_RCP 0x0104
79
80#define PCIE20_L1SUB_CONTROL1 0x158
81#define PCIE20_PARF_DBI_BASE_ADDR 0x168
82#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
83
84#define TX_BASE 0x1000
85#define RX_BASE 0x1200
86#define PCS_BASE 0x1400
87#define PCS_MISC_BASE 0
88#endif
89
90#define TX(n, m) (TX_BASE + n * m * 0x1000)
91#define RX(n, m) (RX_BASE + n * m * 0x1000)
92#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
93#define PCS_MISC_PORT(n, m) (PCS_MISC_BASE + n * m * 0x1000)
94
95#define QSERDES_COM_BG_TIMER 0x00C
96#define QSERDES_COM_SSC_EN_CENTER 0x010
97#define QSERDES_COM_SSC_ADJ_PER1 0x014
98#define QSERDES_COM_SSC_ADJ_PER2 0x018
99#define QSERDES_COM_SSC_PER1 0x01C
100#define QSERDES_COM_SSC_PER2 0x020
101#define QSERDES_COM_SSC_STEP_SIZE1 0x024
102#define QSERDES_COM_SSC_STEP_SIZE2 0x028
103#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
104#define QSERDES_COM_CLK_ENABLE1 0x038
105#define QSERDES_COM_SYS_CLK_CTRL 0x03C
106#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
107#define QSERDES_COM_PLL_IVCO 0x048
108#define QSERDES_COM_LOCK_CMP1_MODE0 0x04C
109#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
110#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
111#define QSERDES_COM_BG_TRIM 0x070
112#define QSERDES_COM_CLK_EP_DIV 0x074
113#define QSERDES_COM_CP_CTRL_MODE0 0x078
114#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
115#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
116#define QSERDES_COM_SYSCLK_EN_SEL 0x0AC
117#define QSERDES_COM_RESETSM_CNTRL 0x0B4
118#define QSERDES_COM_RESTRIM_CTRL 0x0BC
119#define QSERDES_COM_RESCODE_DIV_NUM 0x0C4
120#define QSERDES_COM_LOCK_CMP_EN 0x0C8
121#define QSERDES_COM_DEC_START_MODE0 0x0D0
122#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0DC
123#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0E0
124#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0E4
125#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
126#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10C
127#define QSERDES_COM_VCO_TUNE_CTRL 0x124
128#define QSERDES_COM_VCO_TUNE_MAP 0x128
129#define QSERDES_COM_VCO_TUNE1_MODE0 0x12C
130#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
131#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
132#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
133#define QSERDES_COM_BG_CTRL 0x170
134#define QSERDES_COM_CLK_SELECT 0x174
135#define QSERDES_COM_HSCLK_SEL 0x178
136#define QSERDES_COM_CORECLK_DIV 0x184
137#define QSERDES_COM_CORE_CLK_EN 0x18C
138#define QSERDES_COM_C_READY_STATUS 0x190
139#define QSERDES_COM_CMN_CONFIG 0x194
140#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19C
141#define QSERDES_COM_DEBUG_BUS0 0x1A0
142#define QSERDES_COM_DEBUG_BUS1 0x1A4
143#define QSERDES_COM_DEBUG_BUS2 0x1A8
144#define QSERDES_COM_DEBUG_BUS3 0x1AC
145#define QSERDES_COM_DEBUG_BUS_SEL 0x1B0
146
147#define QSERDES_TX_N_RES_CODE_LANE_OFFSET(n, m) (TX(n, m) + 0x4C)
148#define QSERDES_TX_N_DEBUG_BUS_SEL(n, m) (TX(n, m) + 0x64)
149#define QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(n, m) (TX(n, m) + 0x68)
150#define QSERDES_TX_N_LANE_MODE(n, m) (TX(n, m) + 0x94)
151#define QSERDES_TX_N_RCV_DETECT_LVL_2(n, m) (TX(n, m) + 0xAC)
152
153#define QSERDES_RX_N_UCDR_SO_GAIN_HALF(n, m) (RX(n, m) + 0x010)
154#define QSERDES_RX_N_UCDR_SO_GAIN(n, m) (RX(n, m) + 0x01C)
155#define QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(n, m) (RX(n, m) + 0x048)
156#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(n, m) (RX(n, m) + 0x0D8)
157#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(n, m) (RX(n, m) + 0x0DC)
158#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(n, m) (RX(n, m) + 0x0E0)
159#define QSERDES_RX_N_SIGDET_ENABLES(n, m) (RX(n, m) + 0x110)
160#define QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(n, m) (RX(n, m) + 0x11C)
161#define QSERDES_RX_N_SIGDET_LVL(n, m) (RX(n, m) + 0x118)
162#define QSERDES_RX_N_RX_BAND(n, m) (RX(n, m) + 0x120)
163
164#define PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x00)
165#define PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x04)
166#define PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x08)
167#define PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x0C)
168#define PCIE_MISC_N_DEBUG_BUS_0_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x14)
169#define PCIE_MISC_N_DEBUG_BUS_1_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x18)
170#define PCIE_MISC_N_DEBUG_BUS_2_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x1C)
171#define PCIE_MISC_N_DEBUG_BUS_3_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x20)
172
173#define PCIE_N_SW_RESET(n, m) (PCS_PORT(n, m) + 0x00)
174#define PCIE_N_POWER_DOWN_CONTROL(n, m) (PCS_PORT(n, m) + 0x04)
175#define PCIE_N_START_CONTROL(n, m) (PCS_PORT(n, m) + 0x08)
176#define PCIE_N_TXDEEMPH_M6DB_V0(n, m) (PCS_PORT(n, m) + 0x24)
177#define PCIE_N_TXDEEMPH_M3P5DB_V0(n, m) (PCS_PORT(n, m) + 0x28)
178#define PCIE_N_ENDPOINT_REFCLK_DRIVE(n, m) (PCS_PORT(n, m) + 0x54)
179#define PCIE_N_RX_IDLE_DTCT_CNTRL(n, m) (PCS_PORT(n, m) + 0x58)
180#define PCIE_N_POWER_STATE_CONFIG1(n, m) (PCS_PORT(n, m) + 0x60)
181#define PCIE_N_POWER_STATE_CONFIG4(n, m) (PCS_PORT(n, m) + 0x6C)
182#define PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA0)
183#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA4)
184#define PCIE_N_PLL_LOCK_CHK_DLY_TIME(n, m) (PCS_PORT(n, m) + 0xA8)
185#define PCIE_N_TEST_CONTROL4(n, m) (PCS_PORT(n, m) + 0x11C)
186#define PCIE_N_TEST_CONTROL5(n, m) (PCS_PORT(n, m) + 0x120)
187#define PCIE_N_TEST_CONTROL6(n, m) (PCS_PORT(n, m) + 0x124)
188#define PCIE_N_TEST_CONTROL7(n, m) (PCS_PORT(n, m) + 0x128)
189#define PCIE_N_PCS_STATUS(n, m) (PCS_PORT(n, m) + 0x174)
190#define PCIE_N_DEBUG_BUS_0_STATUS(n, m) (PCS_PORT(n, m) + 0x198)
191#define PCIE_N_DEBUG_BUS_1_STATUS(n, m) (PCS_PORT(n, m) + 0x19C)
192#define PCIE_N_DEBUG_BUS_2_STATUS(n, m) (PCS_PORT(n, m) + 0x1A0)
193#define PCIE_N_DEBUG_BUS_3_STATUS(n, m) (PCS_PORT(n, m) + 0x1A4)
194#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1A8)
195#define PCIE_N_OSC_DTCT_ACTIONS(n, m) (PCS_PORT(n, m) + 0x1AC)
196#define PCIE_N_SIGDET_CNTRL(n, m) (PCS_PORT(n, m) + 0x1B0)
197#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(n, m) (PCS_PORT(n, m) + 0x1DC)
198#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1E0)
199
200#define PCIE_COM_SW_RESET 0x400
201#define PCIE_COM_POWER_DOWN_CONTROL 0x404
202#define PCIE_COM_START_CONTROL 0x408
203#define PCIE_COM_DEBUG_BUS_BYTE0_INDEX 0x438
204#define PCIE_COM_DEBUG_BUS_BYTE1_INDEX 0x43C
205#define PCIE_COM_DEBUG_BUS_BYTE2_INDEX 0x440
206#define PCIE_COM_DEBUG_BUS_BYTE3_INDEX 0x444
207#define PCIE_COM_PCS_READY_STATUS 0x448
208#define PCIE_COM_DEBUG_BUS_0_STATUS 0x45C
209#define PCIE_COM_DEBUG_BUS_1_STATUS 0x460
210#define PCIE_COM_DEBUG_BUS_2_STATUS 0x464
211#define PCIE_COM_DEBUG_BUS_3_STATUS 0x468
212
213#define PCIE20_PARF_SYS_CTRL 0x00
214#define PCIE20_PARF_PM_STTS 0x24
215#define PCIE20_PARF_PCS_DEEMPH 0x34
216#define PCIE20_PARF_PCS_SWING 0x38
217#define PCIE20_PARF_PHY_CTRL 0x40
218#define PCIE20_PARF_PHY_REFCLK 0x4C
219#define PCIE20_PARF_CONFIG_BITS 0x50
220#define PCIE20_PARF_TEST_BUS 0xE4
221#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
222#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
223#define PCIE20_PARF_LTSSM 0x1B0
224#define PCIE20_PARF_INT_ALL_STATUS 0x224
225#define PCIE20_PARF_INT_ALL_CLEAR 0x228
226#define PCIE20_PARF_INT_ALL_MASK 0x22C
227#define PCIE20_PARF_SID_OFFSET 0x234
228#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
229#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
230
231#define PCIE20_ELBI_VERSION 0x00
232#define PCIE20_ELBI_SYS_CTRL 0x04
233#define PCIE20_ELBI_SYS_STTS 0x08
234
235#define PCIE20_CAP 0x70
236#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
237#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
238
239#define PCIE20_COMMAND_STATUS 0x04
240#define PCIE20_HEADER_TYPE 0x0C
241#define PCIE20_BUSNUMBERS 0x18
242#define PCIE20_MEMORY_BASE_LIMIT 0x20
243#define PCIE20_BRIDGE_CTRL 0x3C
244#define PCIE20_DEVICE_CONTROL_STATUS 0x78
245#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
246
247#define PCIE20_AUX_CLK_FREQ_REG 0xB40
248#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
249#define PCIE20_ACK_N_FTS 0xff00
250
251#define PCIE20_PLR_IATU_VIEWPORT 0x900
252#define PCIE20_PLR_IATU_CTRL1 0x904
253#define PCIE20_PLR_IATU_CTRL2 0x908
254#define PCIE20_PLR_IATU_LBAR 0x90C
255#define PCIE20_PLR_IATU_UBAR 0x910
256#define PCIE20_PLR_IATU_LAR 0x914
257#define PCIE20_PLR_IATU_LTAR 0x918
258#define PCIE20_PLR_IATU_UTAR 0x91c
259
260#define PCIE20_CTRL1_TYPE_CFG0 0x04
261#define PCIE20_CTRL1_TYPE_CFG1 0x05
262
263#define PCIE20_CAP_ID 0x10
264#define L1SUB_CAP_ID 0x1E
265
266#define PCIE_CAP_PTR_OFFSET 0x34
267#define PCIE_EXT_CAP_OFFSET 0x100
268
269#define PCIE20_AER_UNCORR_ERR_STATUS_REG 0x104
270#define PCIE20_AER_CORR_ERR_STATUS_REG 0x110
271#define PCIE20_AER_ROOT_ERR_STATUS_REG 0x130
272#define PCIE20_AER_ERR_SRC_ID_REG 0x134
273
274#define RD 0
275#define WR 1
276#define MSM_PCIE_ERROR -1
277
278#define PERST_PROPAGATION_DELAY_US_MIN 1000
279#define PERST_PROPAGATION_DELAY_US_MAX 1005
280#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
281#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
282#define LINK_UP_TIMEOUT_US_MIN 5000
283#define LINK_UP_TIMEOUT_US_MAX 5100
284#define LINK_UP_CHECK_MAX_COUNT 20
285#define PHY_STABILIZATION_DELAY_US_MIN 995
286#define PHY_STABILIZATION_DELAY_US_MAX 1005
287#define POWER_DOWN_DELAY_US_MIN 10
288#define POWER_DOWN_DELAY_US_MAX 11
289#define LINKDOWN_INIT_WAITING_US_MIN 995
290#define LINKDOWN_INIT_WAITING_US_MAX 1005
291#define LINKDOWN_WAITING_US_MIN 4900
292#define LINKDOWN_WAITING_US_MAX 5100
293#define LINKDOWN_WAITING_COUNT 200
294
295#define PHY_READY_TIMEOUT_COUNT 10
296#define XMLH_LINK_UP 0x400
297#define MAX_LINK_RETRIES 5
298#define MAX_BUS_NUM 3
299#define MAX_PROP_SIZE 32
300#define MAX_RC_NAME_LEN 15
301#define MSM_PCIE_MAX_VREG 4
302#define MSM_PCIE_MAX_CLK 9
303#define MSM_PCIE_MAX_PIPE_CLK 1
304#define MAX_RC_NUM 3
305#define MAX_DEVICE_NUM 20
306#define MAX_SHORT_BDF_NUM 16
307#define PCIE_TLP_RD_SIZE 0x5
308#define PCIE_MSI_NR_IRQS 256
309#define MSM_PCIE_MAX_MSI 32
310#define MAX_MSG_LEN 80
311#define PCIE_LOG_PAGES (50)
312#define PCIE_CONF_SPACE_DW 1024
313#define PCIE_CLEAR 0xDEADBEEF
314#define PCIE_LINK_DOWN 0xFFFFFFFF
315
316#define MSM_PCIE_MAX_RESET 4
317#define MSM_PCIE_MAX_PIPE_RESET 1
318
319#define MSM_PCIE_MSI_PHY 0xa0000000
320#define PCIE20_MSI_CTRL_ADDR (0x820)
321#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
322#define PCIE20_MSI_CTRL_INTR_EN (0x828)
323#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
324#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
325#define PCIE20_MSI_CTRL_MAX 8
326
327/* PM control options */
328#define PM_IRQ 0x1
329#define PM_CLK 0x2
330#define PM_GPIO 0x4
331#define PM_VREG 0x8
332#define PM_PIPE_CLK 0x10
333#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
334
335#ifdef CONFIG_PHYS_ADDR_T_64BIT
336#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
337#else
338#define PCIE_UPPER_ADDR(addr) (0x0)
339#endif
340#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
341
342/* Config Space Offsets */
343#define BDF_OFFSET(bus, devfn) \
344 ((bus << 24) | (devfn << 16))
345
346#define PCIE_GEN_DBG(x...) do { \
347 if (msm_pcie_debug_mask) \
348 pr_alert(x); \
349 } while (0)
350
351#define PCIE_DBG(dev, fmt, arg...) do { \
352 if ((dev) && (dev)->ipc_log_long) \
353 ipc_log_string((dev)->ipc_log_long, \
354 "DBG1:%s: " fmt, __func__, arg); \
355 if ((dev) && (dev)->ipc_log) \
356 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
357 if (msm_pcie_debug_mask) \
358 pr_alert("%s: " fmt, __func__, arg); \
359 } while (0)
360
361#define PCIE_DBG2(dev, fmt, arg...) do { \
362 if ((dev) && (dev)->ipc_log) \
363 ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
364 if (msm_pcie_debug_mask) \
365 pr_alert("%s: " fmt, __func__, arg); \
366 } while (0)
367
368#define PCIE_DBG3(dev, fmt, arg...) do { \
369 if ((dev) && (dev)->ipc_log) \
370 ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
371 if (msm_pcie_debug_mask) \
372 pr_alert("%s: " fmt, __func__, arg); \
373 } while (0)
374
375#define PCIE_DUMP(dev, fmt, arg...) do { \
376 if ((dev) && (dev)->ipc_log_dump) \
377 ipc_log_string((dev)->ipc_log_dump, \
378 "DUMP:%s: " fmt, __func__, arg); \
379 } while (0)
380
381#define PCIE_DBG_FS(dev, fmt, arg...) do { \
382 if ((dev) && (dev)->ipc_log_dump) \
383 ipc_log_string((dev)->ipc_log_dump, \
384 "DBG_FS:%s: " fmt, __func__, arg); \
385 pr_alert("%s: " fmt, __func__, arg); \
386 } while (0)
387
388#define PCIE_INFO(dev, fmt, arg...) do { \
389 if ((dev) && (dev)->ipc_log_long) \
390 ipc_log_string((dev)->ipc_log_long, \
391 "INFO:%s: " fmt, __func__, arg); \
392 if ((dev) && (dev)->ipc_log) \
393 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
394 pr_info("%s: " fmt, __func__, arg); \
395 } while (0)
396
397#define PCIE_ERR(dev, fmt, arg...) do { \
398 if ((dev) && (dev)->ipc_log_long) \
399 ipc_log_string((dev)->ipc_log_long, \
400 "ERR:%s: " fmt, __func__, arg); \
401 if ((dev) && (dev)->ipc_log) \
402 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
403 pr_err("%s: " fmt, __func__, arg); \
404 } while (0)
405
406
407enum msm_pcie_res {
408 MSM_PCIE_RES_PARF,
409 MSM_PCIE_RES_PHY,
410 MSM_PCIE_RES_DM_CORE,
411 MSM_PCIE_RES_ELBI,
412 MSM_PCIE_RES_CONF,
413 MSM_PCIE_RES_IO,
414 MSM_PCIE_RES_BARS,
415 MSM_PCIE_RES_TCSR,
416 MSM_PCIE_MAX_RES,
417};
418
419enum msm_pcie_irq {
420 MSM_PCIE_INT_MSI,
421 MSM_PCIE_INT_A,
422 MSM_PCIE_INT_B,
423 MSM_PCIE_INT_C,
424 MSM_PCIE_INT_D,
425 MSM_PCIE_INT_PLS_PME,
426 MSM_PCIE_INT_PME_LEGACY,
427 MSM_PCIE_INT_PLS_ERR,
428 MSM_PCIE_INT_AER_LEGACY,
429 MSM_PCIE_INT_LINK_UP,
430 MSM_PCIE_INT_LINK_DOWN,
431 MSM_PCIE_INT_BRIDGE_FLUSH_N,
432 MSM_PCIE_INT_GLOBAL_INT,
433 MSM_PCIE_MAX_IRQ,
434};
435
436enum msm_pcie_irq_event {
437 MSM_PCIE_INT_EVT_LINK_DOWN = 1,
438 MSM_PCIE_INT_EVT_BME,
439 MSM_PCIE_INT_EVT_PM_TURNOFF,
440 MSM_PCIE_INT_EVT_DEBUG,
441 MSM_PCIE_INT_EVT_LTR,
442 MSM_PCIE_INT_EVT_MHI_Q6,
443 MSM_PCIE_INT_EVT_MHI_A7,
444 MSM_PCIE_INT_EVT_DSTATE_CHANGE,
445 MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
446 MSM_PCIE_INT_EVT_MMIO_WRITE,
447 MSM_PCIE_INT_EVT_CFG_WRITE,
448 MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
449 MSM_PCIE_INT_EVT_LINK_UP,
450 MSM_PCIE_INT_EVT_AER_LEGACY,
451 MSM_PCIE_INT_EVT_AER_ERR,
452 MSM_PCIE_INT_EVT_PME_LEGACY,
453 MSM_PCIE_INT_EVT_PLS_PME,
454 MSM_PCIE_INT_EVT_INTD,
455 MSM_PCIE_INT_EVT_INTC,
456 MSM_PCIE_INT_EVT_INTB,
457 MSM_PCIE_INT_EVT_INTA,
458 MSM_PCIE_INT_EVT_EDMA,
459 MSM_PCIE_INT_EVT_MSI_0,
460 MSM_PCIE_INT_EVT_MSI_1,
461 MSM_PCIE_INT_EVT_MSI_2,
462 MSM_PCIE_INT_EVT_MSI_3,
463 MSM_PCIE_INT_EVT_MSI_4,
464 MSM_PCIE_INT_EVT_MSI_5,
465 MSM_PCIE_INT_EVT_MSI_6,
466 MSM_PCIE_INT_EVT_MSI_7,
467 MSM_PCIE_INT_EVT_MAX = 30,
468};
469
470enum msm_pcie_gpio {
471 MSM_PCIE_GPIO_PERST,
472 MSM_PCIE_GPIO_WAKE,
473 MSM_PCIE_GPIO_EP,
474 MSM_PCIE_MAX_GPIO
475};
476
477enum msm_pcie_link_status {
478 MSM_PCIE_LINK_DEINIT,
479 MSM_PCIE_LINK_ENABLED,
480 MSM_PCIE_LINK_DISABLED
481};
482
483/* gpio info structure */
484struct msm_pcie_gpio_info_t {
485 char *name;
486 uint32_t num;
487 bool out;
488 uint32_t on;
489 uint32_t init;
490 bool required;
491};
492
493/* voltage regulator info structrue */
494struct msm_pcie_vreg_info_t {
495 struct regulator *hdl;
496 char *name;
497 uint32_t max_v;
498 uint32_t min_v;
499 uint32_t opt_mode;
500 bool required;
501};
502
503/* reset info structure */
504struct msm_pcie_reset_info_t {
505 struct reset_control *hdl;
506 char *name;
507 bool required;
508};
509
510/* clock info structure */
511struct msm_pcie_clk_info_t {
512 struct clk *hdl;
513 char *name;
514 u32 freq;
515 bool config_mem;
516 bool required;
517};
518
519/* resource info structure */
520struct msm_pcie_res_info_t {
521 char *name;
522 struct resource *resource;
523 void __iomem *base;
524};
525
526/* irq info structrue */
527struct msm_pcie_irq_info_t {
528 char *name;
529 uint32_t num;
530};
531
532/* phy info structure */
533struct msm_pcie_phy_info_t {
534 u32 offset;
535 u32 val;
536 u32 delay;
537};
538
539/* PCIe device info structure */
540struct msm_pcie_device_info {
541 u32 bdf;
542 struct pci_dev *dev;
543 short short_bdf;
544 u32 sid;
545 int domain;
546 void __iomem *conf_base;
547 unsigned long phy_address;
548 u32 dev_ctrlstts_offset;
549 struct msm_pcie_register_event *event_reg;
550 bool registered;
551};
552
553/* msm pcie device structure */
554struct msm_pcie_dev_t {
555 struct platform_device *pdev;
556 struct pci_dev *dev;
557 struct regulator *gdsc;
558 struct regulator *gdsc_smmu;
559 struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
560 struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
561 struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
562 struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
563 struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
564 struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
565 struct msm_pcie_irq_info_t msi[MSM_PCIE_MAX_MSI];
566 struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
567 struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
568
569 void __iomem *parf;
570 void __iomem *phy;
571 void __iomem *elbi;
572 void __iomem *dm_core;
573 void __iomem *conf;
574 void __iomem *bars;
575 void __iomem *tcsr;
576
577 uint32_t axi_bar_start;
578 uint32_t axi_bar_end;
579
580 struct resource *dev_mem_res;
581 struct resource *dev_io_res;
582
583 uint32_t wake_n;
584 uint32_t vreg_n;
585 uint32_t gpio_n;
586 uint32_t parf_deemph;
587 uint32_t parf_swing;
588
589 bool cfg_access;
590 spinlock_t cfg_lock;
591 unsigned long irqsave_flags;
592 struct mutex enumerate_lock;
593 struct mutex setup_lock;
594
595 struct irq_domain *irq_domain;
596 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
597 uint32_t msi_gicm_addr;
598 uint32_t msi_gicm_base;
599 bool use_msi;
600
601 enum msm_pcie_link_status link_status;
602 bool user_suspend;
603 bool disable_pc;
604 struct pci_saved_state *saved_state;
605
606 struct wakeup_source ws;
607 struct msm_bus_scale_pdata *bus_scale_table;
608 uint32_t bus_client;
609
610 bool l0s_supported;
611 bool l1_supported;
612 bool l1ss_supported;
613 bool common_clk_en;
614 bool clk_power_manage_en;
615 bool aux_clk_sync;
616 bool aer_enable;
617 bool smmu_exist;
618 uint32_t smmu_sid_base;
619 uint32_t n_fts;
620 bool ext_ref_clk;
621 bool common_phy;
622 uint32_t ep_latency;
623 uint32_t wr_halt_size;
624 uint32_t cpl_timeout;
625 uint32_t current_bdf;
626 short current_short_bdf;
627 uint32_t perst_delay_us_min;
628 uint32_t perst_delay_us_max;
629 uint32_t tlp_rd_size;
630 bool linkdown_panic;
631 bool ep_wakeirq;
632
633 uint32_t rc_idx;
634 uint32_t phy_ver;
635 bool drv_ready;
636 bool enumerated;
637 struct work_struct handle_wake_work;
638 struct mutex recovery_lock;
639 spinlock_t linkdown_lock;
640 spinlock_t wakeup_lock;
641 spinlock_t global_irq_lock;
642 spinlock_t aer_lock;
643 ulong linkdown_counter;
644 ulong link_turned_on_counter;
645 ulong link_turned_off_counter;
646 ulong rc_corr_counter;
647 ulong rc_non_fatal_counter;
648 ulong rc_fatal_counter;
649 ulong ep_corr_counter;
650 ulong ep_non_fatal_counter;
651 ulong ep_fatal_counter;
652 bool suspending;
653 ulong wake_counter;
654 u32 num_active_ep;
655 u32 num_ep;
656 bool pending_ep_reg;
657 u32 phy_len;
658 u32 port_phy_len;
659 struct msm_pcie_phy_info_t *phy_sequence;
660 struct msm_pcie_phy_info_t *port_phy_sequence;
661 u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
662 u32 rc_shadow[PCIE_CONF_SPACE_DW];
663 bool shadow_en;
664 bool bridge_found;
665 struct msm_pcie_register_event *event_reg;
666 unsigned int scm_dev_id;
667 bool power_on;
668 void *ipc_log;
669 void *ipc_log_long;
670 void *ipc_log_dump;
671 bool use_19p2mhz_aux_clk;
672 bool use_pinctrl;
673 struct pinctrl *pinctrl;
674 struct pinctrl_state *pins_default;
675 struct pinctrl_state *pins_sleep;
676 struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
677};
678
679
680/* debug mask sys interface */
681static int msm_pcie_debug_mask;
682module_param_named(debug_mask, msm_pcie_debug_mask,
683 int, 0644);
684
685/* debugfs values */
686static u32 rc_sel;
687static u32 base_sel;
688static u32 wr_offset;
689static u32 wr_mask;
690static u32 wr_value;
691static ulong corr_counter_limit = 5;
692
693/* counter to keep track if common PHY needs to be configured */
694static u32 num_rc_on;
695
696/* global lock for PCIe common PHY */
697static struct mutex com_phy_lock;
698
699/* Table to track info of PCIe devices */
700static struct msm_pcie_device_info
701 msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
702
703/* PCIe driver state */
704struct pcie_drv_sta {
705 u32 rc_num;
706 struct mutex drv_lock;
707} pcie_drv;
708
709/* msm pcie device data */
710static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
711
712/* regulators */
713static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
714 {NULL, "vreg-3.3", 0, 0, 0, false},
715 {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
716 {NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
717 {NULL, "vreg-cx", 0, 0, 0, false}
718};
719
720/* GPIOs */
721static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
722 {"perst-gpio", 0, 1, 0, 0, 1},
723 {"wake-gpio", 0, 0, 0, 0, 0},
724 {"qcom,ep-gpio", 0, 1, 1, 0, 0}
725};
726
727/* resets */
728static struct msm_pcie_reset_info_t
729msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
730 {
731 {NULL, "pcie_phy_reset", false},
732 {NULL, "pcie_phy_com_reset", false},
733 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
734 {NULL, "pcie_0_phy_reset", false}
735 },
736 {
737 {NULL, "pcie_phy_reset", false},
738 {NULL, "pcie_phy_com_reset", false},
739 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
740 {NULL, "pcie_1_phy_reset", false}
741 },
742 {
743 {NULL, "pcie_phy_reset", false},
744 {NULL, "pcie_phy_com_reset", false},
745 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
746 {NULL, "pcie_2_phy_reset", false}
747 }
748};
749
750/* pipe reset */
751static struct msm_pcie_reset_info_t
752msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
753 {
754 {NULL, "pcie_0_phy_pipe_reset", false}
755 },
756 {
757 {NULL, "pcie_1_phy_pipe_reset", false}
758 },
759 {
760 {NULL, "pcie_2_phy_pipe_reset", false}
761 }
762};
763
764/* clocks */
765static struct msm_pcie_clk_info_t
766 msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
767 {
768 {NULL, "pcie_0_ref_clk_src", 0, false, false},
769 {NULL, "pcie_0_aux_clk", 1010000, false, true},
770 {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
771 {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
772 {NULL, "pcie_0_slv_axi_clk", 0, true, true},
773 {NULL, "pcie_0_ldo", 0, false, true},
774 {NULL, "pcie_0_smmu_clk", 0, false, false},
775 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
776 {NULL, "pcie_phy_aux_clk", 0, false, false}
777 },
778 {
779 {NULL, "pcie_1_ref_clk_src", 0, false, false},
780 {NULL, "pcie_1_aux_clk", 1010000, false, true},
781 {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
782 {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
783 {NULL, "pcie_1_slv_axi_clk", 0, true, true},
784 {NULL, "pcie_1_ldo", 0, false, true},
785 {NULL, "pcie_1_smmu_clk", 0, false, false},
786 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
787 {NULL, "pcie_phy_aux_clk", 0, false, false}
788 },
789 {
790 {NULL, "pcie_2_ref_clk_src", 0, false, false},
791 {NULL, "pcie_2_aux_clk", 1010000, false, true},
792 {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
793 {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
794 {NULL, "pcie_2_slv_axi_clk", 0, true, true},
795 {NULL, "pcie_2_ldo", 0, false, true},
796 {NULL, "pcie_2_smmu_clk", 0, false, false},
797 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
798 {NULL, "pcie_phy_aux_clk", 0, false, false}
799 }
800};
801
802/* Pipe Clocks */
803static struct msm_pcie_clk_info_t
804 msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
805 {
806 {NULL, "pcie_0_pipe_clk", 125000000, true, true},
807 },
808 {
809 {NULL, "pcie_1_pipe_clk", 125000000, true, true},
810 },
811 {
812 {NULL, "pcie_2_pipe_clk", 125000000, true, true},
813 }
814};
815
816/* resources */
817static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
818 {"parf", 0, 0},
819 {"phy", 0, 0},
820 {"dm_core", 0, 0},
821 {"elbi", 0, 0},
822 {"conf", 0, 0},
823 {"io", 0, 0},
824 {"bars", 0, 0},
825 {"tcsr", 0, 0}
826};
827
828/* irqs */
829static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
830 {"int_msi", 0},
831 {"int_a", 0},
832 {"int_b", 0},
833 {"int_c", 0},
834 {"int_d", 0},
835 {"int_pls_pme", 0},
836 {"int_pme_legacy", 0},
837 {"int_pls_err", 0},
838 {"int_aer_legacy", 0},
839 {"int_pls_link_up", 0},
840 {"int_pls_link_down", 0},
841 {"int_bridge_flush_n", 0},
842 {"int_global_int", 0}
843};
844
845/* MSIs */
846static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
847 {"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
848 {"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
849 {"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
850 {"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
851 {"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
852 {"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
853 {"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
854 {"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
855};
856
857#ifdef CONFIG_ARM
858#define PCIE_BUS_PRIV_DATA(bus) \
859 (((struct pci_sys_data *)bus->sysdata)->private_data)
860
861static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
862
863static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
864{
865 msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
866 msm_pcie_sys_data[dev->rc_idx].private_data = dev;
867
868 return &msm_pcie_sys_data[dev->rc_idx];
869}
870
871static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
872{
873 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
874}
875#else
876#define PCIE_BUS_PRIV_DATA(bus) \
877 (struct msm_pcie_dev_t *)(bus->sysdata)
878
879static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
880{
881 return dev;
882}
883
884static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
885{
886}
887#endif
888
889static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value)
890{
891 writel_relaxed(value, base + offset);
892 /* ensure that changes propagated to the hardware */
893 wmb();
894}
895
896static inline void msm_pcie_write_reg_field(void *base, u32 offset,
897 const u32 mask, u32 val)
898{
899 u32 shift = find_first_bit((void *)&mask, 32);
900 u32 tmp = readl_relaxed(base + offset);
901
902 tmp &= ~mask; /* clear written bits */
903 val = tmp | (val << shift);
904 writel_relaxed(val, base + offset);
905 /* ensure that changes propagated to the hardware */
906 wmb();
907}
908
909static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
910 struct msm_pcie_clk_info_t *info)
911{
912 int ret;
913
914 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
915 if (ret)
916 PCIE_ERR(dev,
917 "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
918 dev->rc_idx, info->name, ret);
919 else
920 PCIE_DBG2(dev,
921 "PCIe: RC%d configured core memory for clk %s.\n",
922 dev->rc_idx, info->name);
923
924 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
925 if (ret)
926 PCIE_ERR(dev,
927 "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
928 dev->rc_idx, info->name, ret);
929 else
930 PCIE_DBG2(dev,
931 "PCIe: RC%d configured peripheral memory for clk %s.\n",
932 dev->rc_idx, info->name);
933}
934
935#if defined(CONFIG_ARCH_FSM9010)
936#define PCIE20_PARF_PHY_STTS 0x3c
937#define PCIE2_PHY_RESET_CTRL 0x44
938#define PCIE20_PARF_PHY_REFCLK_CTRL2 0xa0
939#define PCIE20_PARF_PHY_REFCLK_CTRL3 0xa4
940#define PCIE20_PARF_PCS_SWING_CTRL1 0x88
941#define PCIE20_PARF_PCS_SWING_CTRL2 0x8c
942#define PCIE20_PARF_PCS_DEEMPH1 0x74
943#define PCIE20_PARF_PCS_DEEMPH2 0x78
944#define PCIE20_PARF_PCS_DEEMPH3 0x7c
945#define PCIE20_PARF_CONFIGBITS 0x84
946#define PCIE20_PARF_PHY_CTRL3 0x94
947#define PCIE20_PARF_PCS_CTRL 0x80
948
949#define TX_AMP_VAL 127
950#define PHY_RX0_EQ_GEN1_VAL 0
951#define PHY_RX0_EQ_GEN2_VAL 4
952#define TX_DEEMPH_GEN1_VAL 24
953#define TX_DEEMPH_GEN2_3_5DB_VAL 24
954#define TX_DEEMPH_GEN2_6DB_VAL 34
955#define PHY_TX0_TERM_OFFST_VAL 0
956
957static inline void pcie_phy_dump(struct msm_pcie_dev_t *dev)
958{
959}
960
961static inline void pcie20_phy_reset(struct msm_pcie_dev_t *dev, uint32_t assert)
962{
963 msm_pcie_write_reg_field(dev->phy, PCIE2_PHY_RESET_CTRL,
964 BIT(0), (assert) ? 1 : 0);
965}
966
967static void pcie_phy_init(struct msm_pcie_dev_t *dev)
968{
969 PCIE_DBG(dev, "RC%d: Initializing 28LP SNS phy - 100MHz\n",
970 dev->rc_idx);
971
972 /* De-assert Phy SW Reset */
973 pcie20_phy_reset(dev, 1);
974
975 /* Program SSP ENABLE */
976 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL2) & BIT(0))
977 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2,
978 BIT(0), 0);
979 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL3) &
980 BIT(0)) == 0)
981 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL3,
982 BIT(0), 1);
983 /* Program Tx Amplitude */
984 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL1) &
985 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
986 TX_AMP_VAL)
987 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL1,
988 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
989 TX_AMP_VAL);
990 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL2) &
991 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
992 TX_AMP_VAL)
993 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL2,
994 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
995 TX_AMP_VAL);
996 /* Program De-Emphasis */
997 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH1) &
998 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
999 TX_DEEMPH_GEN2_6DB_VAL)
1000 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH1,
1001 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1002 TX_DEEMPH_GEN2_6DB_VAL);
1003
1004 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH2) &
1005 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1006 TX_DEEMPH_GEN2_3_5DB_VAL)
1007 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH2,
1008 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1009 TX_DEEMPH_GEN2_3_5DB_VAL);
1010
1011 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH3) &
1012 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1013 TX_DEEMPH_GEN1_VAL)
1014 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH3,
1015 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1016 TX_DEEMPH_GEN1_VAL);
1017
1018 /* Program Rx_Eq */
1019 if ((readl_relaxed(dev->phy + PCIE20_PARF_CONFIGBITS) &
1020 (BIT(2)|BIT(1)|BIT(0))) != PHY_RX0_EQ_GEN1_VAL)
1021 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_CONFIGBITS,
1022 BIT(2)|BIT(1)|BIT(0), PHY_RX0_EQ_GEN1_VAL);
1023
1024 /* Program Tx0_term_offset */
1025 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_CTRL3) &
1026 (BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1027 PHY_TX0_TERM_OFFST_VAL)
1028 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_CTRL3,
1029 BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1030 PHY_TX0_TERM_OFFST_VAL);
1031
1032 /* Program REF_CLK source */
1033 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2, BIT(1),
1034 (dev->ext_ref_clk) ? 1 : 0);
1035 /* disable Tx2Rx Loopback */
1036 if (readl_relaxed(dev->phy + PCIE20_PARF_PCS_CTRL) & BIT(1))
1037 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_CTRL,
1038 BIT(1), 0);
1039 /* De-assert Phy SW Reset */
1040 pcie20_phy_reset(dev, 0);
1041}
1042
1043static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1044{
1045
1046 /* read PCIE20_PARF_PHY_STTS twice */
1047 readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS);
1048 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS) & BIT(0))
1049 return false;
1050 else
1051 return true;
1052}
1053#else
1054static void pcie_phy_dump_test_cntrl(struct msm_pcie_dev_t *dev,
1055 u32 cntrl4_val, u32 cntrl5_val,
1056 u32 cntrl6_val, u32 cntrl7_val)
1057{
1058 msm_pcie_write_reg(dev->phy,
1059 PCIE_N_TEST_CONTROL4(dev->rc_idx, dev->common_phy), cntrl4_val);
1060 msm_pcie_write_reg(dev->phy,
1061 PCIE_N_TEST_CONTROL5(dev->rc_idx, dev->common_phy), cntrl5_val);
1062 msm_pcie_write_reg(dev->phy,
1063 PCIE_N_TEST_CONTROL6(dev->rc_idx, dev->common_phy), cntrl6_val);
1064 msm_pcie_write_reg(dev->phy,
1065 PCIE_N_TEST_CONTROL7(dev->rc_idx, dev->common_phy), cntrl7_val);
1066
1067 PCIE_DUMP(dev,
1068 "PCIe: RC%d PCIE_N_TEST_CONTROL4: 0x%x\n", dev->rc_idx,
1069 readl_relaxed(dev->phy +
1070 PCIE_N_TEST_CONTROL4(dev->rc_idx,
1071 dev->common_phy)));
1072 PCIE_DUMP(dev,
1073 "PCIe: RC%d PCIE_N_TEST_CONTROL5: 0x%x\n", dev->rc_idx,
1074 readl_relaxed(dev->phy +
1075 PCIE_N_TEST_CONTROL5(dev->rc_idx,
1076 dev->common_phy)));
1077 PCIE_DUMP(dev,
1078 "PCIe: RC%d PCIE_N_TEST_CONTROL6: 0x%x\n", dev->rc_idx,
1079 readl_relaxed(dev->phy +
1080 PCIE_N_TEST_CONTROL6(dev->rc_idx,
1081 dev->common_phy)));
1082 PCIE_DUMP(dev,
1083 "PCIe: RC%d PCIE_N_TEST_CONTROL7: 0x%x\n", dev->rc_idx,
1084 readl_relaxed(dev->phy +
1085 PCIE_N_TEST_CONTROL7(dev->rc_idx,
1086 dev->common_phy)));
1087 PCIE_DUMP(dev,
1088 "PCIe: RC%d PCIE_N_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rc_idx,
1089 readl_relaxed(dev->phy +
1090 PCIE_N_DEBUG_BUS_0_STATUS(dev->rc_idx,
1091 dev->common_phy)));
1092 PCIE_DUMP(dev,
1093 "PCIe: RC%d PCIE_N_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rc_idx,
1094 readl_relaxed(dev->phy +
1095 PCIE_N_DEBUG_BUS_1_STATUS(dev->rc_idx,
1096 dev->common_phy)));
1097 PCIE_DUMP(dev,
1098 "PCIe: RC%d PCIE_N_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rc_idx,
1099 readl_relaxed(dev->phy +
1100 PCIE_N_DEBUG_BUS_2_STATUS(dev->rc_idx,
1101 dev->common_phy)));
1102 PCIE_DUMP(dev,
1103 "PCIe: RC%d PCIE_N_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rc_idx,
1104 readl_relaxed(dev->phy +
1105 PCIE_N_DEBUG_BUS_3_STATUS(dev->rc_idx,
1106 dev->common_phy)));
1107}
1108
1109static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
1110{
1111 int i, size;
1112 u32 write_val;
1113
1114 if (dev->phy_ver >= 0x20) {
1115 PCIE_DUMP(dev, "PCIe: RC%d PHY dump is not supported\n",
1116 dev->rc_idx);
1117 return;
1118 }
1119
1120 PCIE_DUMP(dev, "PCIe: RC%d PHY testbus\n", dev->rc_idx);
1121
1122 pcie_phy_dump_test_cntrl(dev, 0x18, 0x19, 0x1A, 0x1B);
1123 pcie_phy_dump_test_cntrl(dev, 0x1C, 0x1D, 0x1E, 0x1F);
1124 pcie_phy_dump_test_cntrl(dev, 0x20, 0x21, 0x22, 0x23);
1125
1126 for (i = 0; i < 3; i++) {
1127 write_val = 0x1 + i;
1128 msm_pcie_write_reg(dev->phy,
1129 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1130 dev->common_phy), write_val);
1131 PCIE_DUMP(dev,
1132 "PCIe: RC%d QSERDES_TX_N_DEBUG_BUS_SEL: 0x%x\n",
1133 dev->rc_idx,
1134 readl_relaxed(dev->phy +
1135 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1136 dev->common_phy)));
1137
1138 pcie_phy_dump_test_cntrl(dev, 0x30, 0x31, 0x32, 0x33);
1139 }
1140
1141 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1142
1143 if (dev->phy_ver >= 0x10 && dev->phy_ver < 0x20) {
1144 pcie_phy_dump_test_cntrl(dev, 0x01, 0x02, 0x03, 0x0A);
1145 pcie_phy_dump_test_cntrl(dev, 0x0E, 0x0F, 0x12, 0x13);
1146 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1147
1148 for (i = 0; i < 8; i += 4) {
1149 write_val = 0x1 + i;
1150 msm_pcie_write_reg(dev->phy,
1151 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(dev->rc_idx,
1152 dev->common_phy), write_val);
1153 msm_pcie_write_reg(dev->phy,
1154 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(dev->rc_idx,
1155 dev->common_phy), write_val + 1);
1156 msm_pcie_write_reg(dev->phy,
1157 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(dev->rc_idx,
1158 dev->common_phy), write_val + 2);
1159 msm_pcie_write_reg(dev->phy,
1160 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(dev->rc_idx,
1161 dev->common_phy), write_val + 3);
1162
1163 PCIE_DUMP(dev,
1164 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1165 dev->rc_idx,
1166 readl_relaxed(dev->phy +
1167 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1168 dev->rc_idx, dev->common_phy)));
1169 PCIE_DUMP(dev,
1170 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1171 dev->rc_idx,
1172 readl_relaxed(dev->phy +
1173 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1174 dev->rc_idx, dev->common_phy)));
1175 PCIE_DUMP(dev,
1176 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1177 dev->rc_idx,
1178 readl_relaxed(dev->phy +
1179 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1180 dev->rc_idx, dev->common_phy)));
1181 PCIE_DUMP(dev,
1182 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1183 dev->rc_idx,
1184 readl_relaxed(dev->phy +
1185 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1186 dev->rc_idx, dev->common_phy)));
1187 PCIE_DUMP(dev,
1188 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_0_STATUS: 0x%x\n",
1189 dev->rc_idx,
1190 readl_relaxed(dev->phy +
1191 PCIE_MISC_N_DEBUG_BUS_0_STATUS(
1192 dev->rc_idx, dev->common_phy)));
1193 PCIE_DUMP(dev,
1194 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_1_STATUS: 0x%x\n",
1195 dev->rc_idx,
1196 readl_relaxed(dev->phy +
1197 PCIE_MISC_N_DEBUG_BUS_1_STATUS(
1198 dev->rc_idx, dev->common_phy)));
1199 PCIE_DUMP(dev,
1200 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_2_STATUS: 0x%x\n",
1201 dev->rc_idx,
1202 readl_relaxed(dev->phy +
1203 PCIE_MISC_N_DEBUG_BUS_2_STATUS(
1204 dev->rc_idx, dev->common_phy)));
1205 PCIE_DUMP(dev,
1206 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_3_STATUS: 0x%x\n",
1207 dev->rc_idx,
1208 readl_relaxed(dev->phy +
1209 PCIE_MISC_N_DEBUG_BUS_3_STATUS(
1210 dev->rc_idx, dev->common_phy)));
1211 }
1212
1213 msm_pcie_write_reg(dev->phy,
1214 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1215 dev->rc_idx, dev->common_phy), 0);
1216 msm_pcie_write_reg(dev->phy,
1217 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1218 dev->rc_idx, dev->common_phy), 0);
1219 msm_pcie_write_reg(dev->phy,
1220 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1221 dev->rc_idx, dev->common_phy), 0);
1222 msm_pcie_write_reg(dev->phy,
1223 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1224 dev->rc_idx, dev->common_phy), 0);
1225 }
1226
1227 for (i = 0; i < 2; i++) {
1228 write_val = 0x2 + i;
1229
1230 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
1231 write_val);
1232
1233 PCIE_DUMP(dev,
1234 "PCIe: RC%d to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
1235 dev->rc_idx,
1236 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
1237 PCIE_DUMP(dev,
1238 "PCIe: RC%d QSERDES_COM_DEBUG_BUS0: 0x%x\n",
1239 dev->rc_idx,
1240 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
1241 PCIE_DUMP(dev,
1242 "PCIe: RC%d QSERDES_COM_DEBUG_BUS1: 0x%x\n",
1243 dev->rc_idx,
1244 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
1245 PCIE_DUMP(dev,
1246 "PCIe: RC%d QSERDES_COM_DEBUG_BUS2: 0x%x\n",
1247 dev->rc_idx,
1248 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
1249 PCIE_DUMP(dev,
1250 "PCIe: RC%d QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
1251 dev->rc_idx,
1252 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
1253 }
1254
1255 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
1256
1257 if (dev->common_phy) {
1258 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1259 0x01);
1260 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE1_INDEX,
1261 0x02);
1262 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE2_INDEX,
1263 0x03);
1264 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE3_INDEX,
1265 0x04);
1266
1267 PCIE_DUMP(dev,
1268 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1269 dev->rc_idx,
1270 readl_relaxed(dev->phy +
1271 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1272 PCIE_DUMP(dev,
1273 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1274 dev->rc_idx,
1275 readl_relaxed(dev->phy +
1276 PCIE_COM_DEBUG_BUS_BYTE1_INDEX));
1277 PCIE_DUMP(dev,
1278 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1279 dev->rc_idx,
1280 readl_relaxed(dev->phy +
1281 PCIE_COM_DEBUG_BUS_BYTE2_INDEX));
1282 PCIE_DUMP(dev,
1283 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1284 dev->rc_idx,
1285 readl_relaxed(dev->phy +
1286 PCIE_COM_DEBUG_BUS_BYTE3_INDEX));
1287 PCIE_DUMP(dev,
1288 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n",
1289 dev->rc_idx,
1290 readl_relaxed(dev->phy +
1291 PCIE_COM_DEBUG_BUS_0_STATUS));
1292 PCIE_DUMP(dev,
1293 "PCIe: RC%d PCIE_COM_DEBUG_BUS_1_STATUS: 0x%x\n",
1294 dev->rc_idx,
1295 readl_relaxed(dev->phy +
1296 PCIE_COM_DEBUG_BUS_1_STATUS));
1297 PCIE_DUMP(dev,
1298 "PCIe: RC%d PCIE_COM_DEBUG_BUS_2_STATUS: 0x%x\n",
1299 dev->rc_idx,
1300 readl_relaxed(dev->phy +
1301 PCIE_COM_DEBUG_BUS_2_STATUS));
1302 PCIE_DUMP(dev,
1303 "PCIe: RC%d PCIE_COM_DEBUG_BUS_3_STATUS: 0x%x\n",
1304 dev->rc_idx,
1305 readl_relaxed(dev->phy +
1306 PCIE_COM_DEBUG_BUS_3_STATUS));
1307
1308 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1309 0x05);
1310
1311 PCIE_DUMP(dev,
1312 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1313 dev->rc_idx,
1314 readl_relaxed(dev->phy +
1315 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1316 PCIE_DUMP(dev,
1317 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n\n",
1318 dev->rc_idx,
1319 readl_relaxed(dev->phy +
1320 PCIE_COM_DEBUG_BUS_0_STATUS));
1321 }
1322
1323 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
1324 for (i = 0; i < size; i += 32) {
1325 PCIE_DUMP(dev,
1326 "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1327 dev->rc_idx, i,
1328 readl_relaxed(dev->phy + i),
1329 readl_relaxed(dev->phy + (i + 4)),
1330 readl_relaxed(dev->phy + (i + 8)),
1331 readl_relaxed(dev->phy + (i + 12)),
1332 readl_relaxed(dev->phy + (i + 16)),
1333 readl_relaxed(dev->phy + (i + 20)),
1334 readl_relaxed(dev->phy + (i + 24)),
1335 readl_relaxed(dev->phy + (i + 28)));
1336 }
1337}
1338
1339#ifdef CONFIG_ARCH_MDMCALIFORNIUM
1340static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1341{
1342 u8 common_phy;
1343
1344 PCIE_DBG(dev,
1345 "RC%d: Initializing MDM 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1346 dev->rc_idx);
1347
1348 if (dev->common_phy)
1349 common_phy = 1;
1350 else
1351 common_phy = 0;
1352
1353 msm_pcie_write_reg(dev->phy,
1354 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1355 0x01);
1356 msm_pcie_write_reg(dev->phy,
1357 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1358 0x03);
1359
1360 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
1361 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1362
1363 msm_pcie_write_reg(dev->phy,
1364 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy), 0x06);
1365
1366 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
1367 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1368 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1369 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1370 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1371 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1372 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1373 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1374 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
1375 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1376 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1377
1378 if (dev->tcsr) {
1379 PCIE_DBG(dev, "RC%d: TCSR PHY clock scheme is 0x%x\n",
1380 dev->rc_idx, readl_relaxed(dev->tcsr));
1381
1382 if (readl_relaxed(dev->tcsr) & (BIT(1) | BIT(0)))
1383 msm_pcie_write_reg(dev->phy,
1384 QSERDES_COM_SYSCLK_EN_SEL, 0x0A);
1385 else
1386 msm_pcie_write_reg(dev->phy,
1387 QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1388 }
1389
1390 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1391 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1392 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1393 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1394 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1395 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x0D);
1396 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x04);
1397 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1398 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1399 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1400 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1401 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1402 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1403 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1404 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1405
1406 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1407 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1408 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1409 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1410 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1411 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1412 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1413
1414 msm_pcie_write_reg(dev->phy,
1415 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1416 common_phy), 0x45);
1417
1418 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1419
1420 msm_pcie_write_reg(dev->phy,
1421 QSERDES_TX_N_RES_CODE_LANE_OFFSET(dev->rc_idx, common_phy),
1422 0x02);
1423 msm_pcie_write_reg(dev->phy,
1424 QSERDES_TX_N_RCV_DETECT_LVL_2(dev->rc_idx, common_phy),
1425 0x12);
1426
1427 msm_pcie_write_reg(dev->phy,
1428 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1429 0x1C);
1430 msm_pcie_write_reg(dev->phy,
1431 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1432 0x14);
1433 msm_pcie_write_reg(dev->phy,
1434 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1435 0x01);
1436 msm_pcie_write_reg(dev->phy,
1437 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1438 0x00);
1439 msm_pcie_write_reg(dev->phy,
1440 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1441 0xDB);
1442 msm_pcie_write_reg(dev->phy,
1443 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1444 common_phy),
1445 0x4B);
1446 msm_pcie_write_reg(dev->phy,
1447 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1448 0x04);
1449 msm_pcie_write_reg(dev->phy,
1450 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1451 0x04);
1452
1453 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1454
1455 msm_pcie_write_reg(dev->phy,
1456 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1457 0x04);
1458 msm_pcie_write_reg(dev->phy,
1459 PCIE_N_OSC_DTCT_ACTIONS(dev->rc_idx, common_phy),
1460 0x00);
1461 msm_pcie_write_reg(dev->phy,
1462 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1463 0x40);
1464 msm_pcie_write_reg(dev->phy,
1465 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1466 0x00);
1467 msm_pcie_write_reg(dev->phy,
1468 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(dev->rc_idx, common_phy),
1469 0x40);
1470 msm_pcie_write_reg(dev->phy,
1471 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1472 0x00);
1473 msm_pcie_write_reg(dev->phy,
1474 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1475 0x40);
1476 msm_pcie_write_reg(dev->phy,
1477 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1478 0x73);
1479 msm_pcie_write_reg(dev->phy,
1480 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1481 0x99);
1482 msm_pcie_write_reg(dev->phy,
1483 PCIE_N_TXDEEMPH_M6DB_V0(dev->rc_idx, common_phy),
1484 0x15);
1485 msm_pcie_write_reg(dev->phy,
1486 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1487 0x0E);
1488
1489 msm_pcie_write_reg(dev->phy,
1490 PCIE_N_SIGDET_CNTRL(dev->rc_idx, common_phy),
1491 0x07);
1492
1493 msm_pcie_write_reg(dev->phy,
1494 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1495 0x00);
1496 msm_pcie_write_reg(dev->phy,
1497 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1498 0x03);
1499}
1500
1501static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1502{
1503}
1504
1505static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1506{
1507 if (readl_relaxed(dev->phy +
1508 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) & BIT(6))
1509 return false;
1510 else
1511 return true;
1512}
1513#else
1514static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1515{
1516 int i;
1517 struct msm_pcie_phy_info_t *phy_seq;
1518
1519 PCIE_DBG(dev,
1520 "RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1521 dev->rc_idx);
1522
1523 if (dev->phy_sequence) {
1524 i = dev->phy_len;
1525 phy_seq = dev->phy_sequence;
1526 while (i--) {
1527 msm_pcie_write_reg(dev->phy,
1528 phy_seq->offset,
1529 phy_seq->val);
1530 if (phy_seq->delay)
1531 usleep_range(phy_seq->delay,
1532 phy_seq->delay + 1);
1533 phy_seq++;
1534 }
1535 return;
1536 }
1537
1538 if (dev->common_phy)
1539 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0x01);
1540
1541 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1C);
1542 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1543 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1544 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1545 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x42);
1546 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1547 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1548 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1549 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x01);
1550 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1551 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x00);
1552 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1553 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1554 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1555 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1556 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1557 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1558 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1559 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
1560 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
1561 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1562 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1563 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1564 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1565 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1566 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1567 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1568 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1569 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1570 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1571 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1572 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1573 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1574 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1575 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1576 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1577
1578 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
1579 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1580 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1581
1582 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1583 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1584
1585 if (dev->phy_ver == 0x3) {
1586 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1587 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x40);
1588 }
1589
1590 if (dev->common_phy) {
1591 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x00);
1592 msm_pcie_write_reg(dev->phy, PCIE_COM_START_CONTROL, 0x03);
1593 }
1594}
1595
1596static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1597{
1598 int i;
1599 struct msm_pcie_phy_info_t *phy_seq;
1600 u8 common_phy;
1601
1602 if (dev->phy_ver >= 0x20)
1603 return;
1604
1605 PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
1606
1607 if (dev->common_phy)
1608 common_phy = 1;
1609 else
1610 common_phy = 0;
1611
1612 if (dev->port_phy_sequence) {
1613 i = dev->port_phy_len;
1614 phy_seq = dev->port_phy_sequence;
1615 while (i--) {
1616 msm_pcie_write_reg(dev->phy,
1617 phy_seq->offset,
1618 phy_seq->val);
1619 if (phy_seq->delay)
1620 usleep_range(phy_seq->delay,
1621 phy_seq->delay + 1);
1622 phy_seq++;
1623 }
1624 return;
1625 }
1626
1627 msm_pcie_write_reg(dev->phy,
1628 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1629 common_phy), 0x45);
1630 msm_pcie_write_reg(dev->phy,
1631 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy),
1632 0x06);
1633
1634 msm_pcie_write_reg(dev->phy,
1635 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1636 0x1C);
1637 msm_pcie_write_reg(dev->phy,
1638 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1639 0x17);
1640 msm_pcie_write_reg(dev->phy,
1641 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1642 0x01);
1643 msm_pcie_write_reg(dev->phy,
1644 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1645 0x00);
1646 msm_pcie_write_reg(dev->phy,
1647 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1648 0xDB);
1649 msm_pcie_write_reg(dev->phy,
1650 QSERDES_RX_N_RX_BAND(dev->rc_idx, common_phy),
1651 0x18);
1652 msm_pcie_write_reg(dev->phy,
1653 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1654 0x04);
1655 msm_pcie_write_reg(dev->phy,
1656 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1657 0x04);
1658 msm_pcie_write_reg(dev->phy,
1659 PCIE_N_RX_IDLE_DTCT_CNTRL(dev->rc_idx, common_phy),
1660 0x4C);
1661 msm_pcie_write_reg(dev->phy,
1662 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1663 0x00);
1664 msm_pcie_write_reg(dev->phy,
1665 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1666 0x01);
1667 msm_pcie_write_reg(dev->phy,
1668 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1669 0x05);
1670 msm_pcie_write_reg(dev->phy,
1671 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1672 common_phy), 0x4B);
1673 msm_pcie_write_reg(dev->phy,
1674 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1675 0x14);
1676
1677 msm_pcie_write_reg(dev->phy,
1678 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1679 0x05);
1680 msm_pcie_write_reg(dev->phy,
1681 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1682 0x02);
1683 msm_pcie_write_reg(dev->phy,
1684 PCIE_N_POWER_STATE_CONFIG4(dev->rc_idx, common_phy),
1685 0x00);
1686 msm_pcie_write_reg(dev->phy,
1687 PCIE_N_POWER_STATE_CONFIG1(dev->rc_idx, common_phy),
1688 0xA3);
1689
1690 if (dev->phy_ver == 0x3) {
1691 msm_pcie_write_reg(dev->phy,
1692 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1693 0x19);
1694
1695 msm_pcie_write_reg(dev->phy,
1696 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1697 0x0E);
1698 }
1699
1700 msm_pcie_write_reg(dev->phy,
1701 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1702 0x03);
1703 usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
1704
1705 msm_pcie_write_reg(dev->phy,
1706 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1707 0x00);
1708 msm_pcie_write_reg(dev->phy,
1709 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1710 0x0A);
1711}
1712
1713static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1714{
1715 if (dev->phy_ver >= 0x20) {
1716 if (readl_relaxed(dev->phy +
1717 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) &
1718 BIT(6))
1719 return false;
1720 else
1721 return true;
1722 }
1723
1724 if (!(readl_relaxed(dev->phy + PCIE_COM_PCS_READY_STATUS) & 0x1))
1725 return false;
1726 else
1727 return true;
1728}
1729#endif
1730#endif
1731
1732static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
1733{
1734 int ret, scm_ret;
1735
1736 if (!dev) {
1737 pr_err("PCIe: the input pcie dev is NULL.\n");
1738 return -ENODEV;
1739 }
1740
1741 ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
1742 if (ret || scm_ret) {
1743 PCIE_ERR(dev,
1744 "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
1745 dev->rc_idx, ret, scm_ret);
1746 return ret ? ret : -EINVAL;
1747 }
1748
1749 return 0;
1750}
1751
1752static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
1753 u32 offset)
1754{
1755 if (offset % 4) {
1756 PCIE_ERR(dev,
1757 "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
1758 dev->rc_idx, offset);
1759 return MSM_PCIE_ERROR;
1760 }
1761
1762 return 0;
1763}
1764
1765static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
1766 bool check_sw_stts,
1767 bool check_ep,
1768 void __iomem *ep_conf)
1769{
1770 u32 val;
1771
1772 if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
1773 PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
1774 dev->rc_idx);
1775 return false;
1776 }
1777
1778 if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
1779 PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
1780 dev->rc_idx);
1781 return false;
1782 }
1783
1784 val = readl_relaxed(dev->dm_core);
1785 PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
1786 dev->rc_idx, val);
1787 if (val == PCIE_LINK_DOWN) {
1788 PCIE_ERR(dev,
1789 "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
1790 dev->rc_idx, dev->rc_idx, val);
1791 return false;
1792 }
1793
1794 if (check_ep) {
1795 val = readl_relaxed(ep_conf);
1796 PCIE_DBG(dev,
1797 "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
1798 dev->rc_idx, val);
1799 if (val == PCIE_LINK_DOWN) {
1800 PCIE_ERR(dev,
1801 "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
1802 dev->rc_idx, dev->rc_idx, val);
1803 return false;
1804 }
1805 }
1806
1807 return true;
1808}
1809
1810static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
1811{
1812 int i, j;
1813 u32 val = 0;
1814 u32 *shadow;
1815 void *cfg = dev->conf;
1816
1817 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1818 if (!rc && !dev->pcidev_table[i].bdf)
1819 break;
1820 if (rc) {
1821 cfg = dev->dm_core;
1822 shadow = dev->rc_shadow;
1823 } else {
1824 if (!msm_pcie_confirm_linkup(dev, false, true,
1825 dev->pcidev_table[i].conf_base))
1826 continue;
1827
1828 shadow = dev->ep_shadow[i];
1829 PCIE_DBG(dev,
1830 "PCIe Device: %02x:%02x.%01x\n",
1831 dev->pcidev_table[i].bdf >> 24,
1832 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1833 dev->pcidev_table[i].bdf >> 16 & 0x07);
1834 }
1835 for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
1836 val = shadow[j];
1837 if (val != PCIE_CLEAR) {
1838 PCIE_DBG3(dev,
1839 "PCIe: before recovery:cfg 0x%x:0x%x\n",
1840 j * 4, readl_relaxed(cfg + j * 4));
1841 PCIE_DBG3(dev,
1842 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1843 j, j * 4, val);
1844 writel_relaxed(val, cfg + j * 4);
1845 /* ensure changes propagated to the hardware */
1846 wmb();
1847 PCIE_DBG3(dev,
1848 "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
1849 j * 4, readl_relaxed(cfg + j * 4));
1850 }
1851 }
1852 if (rc)
1853 break;
1854
1855 pci_save_state(dev->pcidev_table[i].dev);
1856 cfg += SZ_4K;
1857 }
1858}
1859
1860static void msm_pcie_write_mask(void __iomem *addr,
1861 uint32_t clear_mask, uint32_t set_mask)
1862{
1863 uint32_t val;
1864
1865 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
1866 writel_relaxed(val, addr);
1867 wmb(); /* ensure data is written to hardware register */
1868}
1869
1870static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
1871{
1872 int i, size;
1873 u32 original;
1874
1875 PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
1876
1877 original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
1878 for (i = 1; i <= 0x1A; i++) {
1879 msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
1880 0xFF0000, i << 16);
1881 PCIE_DUMP(dev,
1882 "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
1883 dev->rc_idx,
1884 readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
1885 readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
1886 }
1887 writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
1888
1889 PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
1890
1891 size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
1892 for (i = 0; i < size; i += 32) {
1893 PCIE_DUMP(dev,
1894 "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1895 dev->rc_idx, i,
1896 readl_relaxed(dev->parf + i),
1897 readl_relaxed(dev->parf + (i + 4)),
1898 readl_relaxed(dev->parf + (i + 8)),
1899 readl_relaxed(dev->parf + (i + 12)),
1900 readl_relaxed(dev->parf + (i + 16)),
1901 readl_relaxed(dev->parf + (i + 20)),
1902 readl_relaxed(dev->parf + (i + 24)),
1903 readl_relaxed(dev->parf + (i + 28)));
1904 }
1905}
1906
1907static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
1908{
1909 PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
1910 dev->rc_idx, dev->enumerated ? "" : "not");
1911 PCIE_DBG_FS(dev, "PCIe: link is %s\n",
1912 (dev->link_status == MSM_PCIE_LINK_ENABLED)
1913 ? "enabled" : "disabled");
1914 PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
1915 dev->cfg_access ? "" : "not");
1916 PCIE_DBG_FS(dev, "use_msi is %d\n",
1917 dev->use_msi);
1918 PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
1919 dev->use_pinctrl);
1920 PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
1921 dev->use_19p2mhz_aux_clk);
1922 PCIE_DBG_FS(dev, "user_suspend is %d\n",
1923 dev->user_suspend);
1924 PCIE_DBG_FS(dev, "num_ep: %d\n",
1925 dev->num_ep);
1926 PCIE_DBG_FS(dev, "num_active_ep: %d\n",
1927 dev->num_active_ep);
1928 PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
1929 dev->pending_ep_reg ? "true" : "false");
1930 PCIE_DBG_FS(dev, "phy_len is %d",
1931 dev->phy_len);
1932 PCIE_DBG_FS(dev, "port_phy_len is %d",
1933 dev->port_phy_len);
1934 PCIE_DBG_FS(dev, "disable_pc is %d",
1935 dev->disable_pc);
1936 PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
1937 dev->l0s_supported ? "" : "not");
1938 PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
1939 dev->l1_supported ? "" : "not");
1940 PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
1941 dev->l1ss_supported ? "" : "not");
1942 PCIE_DBG_FS(dev, "common_clk_en is %d\n",
1943 dev->common_clk_en);
1944 PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
1945 dev->clk_power_manage_en);
1946 PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
1947 dev->aux_clk_sync);
1948 PCIE_DBG_FS(dev, "AER is %s enable\n",
1949 dev->aer_enable ? "" : "not");
1950 PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
1951 dev->ext_ref_clk);
1952 PCIE_DBG_FS(dev, "ep_wakeirq is %d\n",
1953 dev->ep_wakeirq);
1954 PCIE_DBG_FS(dev, "phy_ver is %d\n",
1955 dev->phy_ver);
1956 PCIE_DBG_FS(dev, "drv_ready is %d\n",
1957 dev->drv_ready);
1958 PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
1959 dev->linkdown_panic);
1960 PCIE_DBG_FS(dev, "the link is %s suspending\n",
1961 dev->suspending ? "" : "not");
1962 PCIE_DBG_FS(dev, "shadow is %s enabled\n",
1963 dev->shadow_en ? "" : "not");
1964 PCIE_DBG_FS(dev, "the power of RC is %s on\n",
1965 dev->power_on ? "" : "not");
1966 PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
1967 dev->msi_gicm_addr);
1968 PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
1969 dev->msi_gicm_base);
1970 PCIE_DBG_FS(dev, "bus_client: %d\n",
1971 dev->bus_client);
1972 PCIE_DBG_FS(dev, "current short bdf: %d\n",
1973 dev->current_short_bdf);
1974 PCIE_DBG_FS(dev, "smmu does %s exist\n",
1975 dev->smmu_exist ? "" : "not");
1976 PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
1977 dev->smmu_sid_base);
1978 PCIE_DBG_FS(dev, "n_fts: %d\n",
1979 dev->n_fts);
1980 PCIE_DBG_FS(dev, "common_phy: %d\n",
1981 dev->common_phy);
1982 PCIE_DBG_FS(dev, "ep_latency: %dms\n",
1983 dev->ep_latency);
1984 PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
1985 dev->wr_halt_size);
1986 PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
1987 dev->cpl_timeout);
1988 PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
1989 dev->current_bdf);
1990 PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
1991 dev->perst_delay_us_min);
1992 PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
1993 dev->perst_delay_us_max);
1994 PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
1995 dev->tlp_rd_size);
1996 PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
1997 dev->rc_corr_counter);
1998 PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
1999 dev->rc_non_fatal_counter);
2000 PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
2001 dev->rc_fatal_counter);
2002 PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
2003 dev->ep_corr_counter);
2004 PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
2005 dev->ep_non_fatal_counter);
2006 PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
2007 dev->ep_fatal_counter);
2008 PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
2009 dev->linkdown_counter);
2010 PCIE_DBG_FS(dev, "wake_counter: %lu\n",
2011 dev->wake_counter);
2012 PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
2013 dev->link_turned_on_counter);
2014 PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
2015 dev->link_turned_off_counter);
2016}
2017
2018static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
2019{
2020 int i, j;
2021 u32 val = 0;
2022 u32 *shadow;
2023
2024 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2025 if (!rc && !dev->pcidev_table[i].bdf)
2026 break;
2027 if (rc) {
2028 shadow = dev->rc_shadow;
2029 } else {
2030 shadow = dev->ep_shadow[i];
2031 PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
2032 dev->pcidev_table[i].bdf >> 24,
2033 dev->pcidev_table[i].bdf >> 19 & 0x1f,
2034 dev->pcidev_table[i].bdf >> 16 & 0x07);
2035 }
2036 for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
2037 val = shadow[j];
2038 if (val != PCIE_CLEAR) {
2039 PCIE_DBG_FS(dev,
2040 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
2041 j, j * 4, val);
2042 }
2043 }
2044 if (rc)
2045 break;
2046 }
2047}
2048
2049static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
2050 u32 testcase)
2051{
2052 int ret, i;
2053 u32 base_sel_size = 0;
2054 u32 val = 0;
2055 u32 current_offset = 0;
2056 u32 ep_l1sub_ctrl1_offset = 0;
2057 u32 ep_l1sub_cap_reg1_offset = 0;
2058 u32 ep_link_ctrlstts_offset = 0;
2059 u32 ep_dev_ctrl2stts2_offset = 0;
2060
2061 if (testcase >= 5 && testcase <= 10) {
2062 current_offset =
2063 readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
2064
2065 while (current_offset) {
2066 val = readl_relaxed(dev->conf + current_offset);
2067 if ((val & 0xff) == PCIE20_CAP_ID) {
2068 ep_link_ctrlstts_offset = current_offset +
2069 0x10;
2070 ep_dev_ctrl2stts2_offset = current_offset +
2071 0x28;
2072 break;
2073 }
2074 current_offset = (val >> 8) & 0xff;
2075 }
2076
2077 if (!ep_link_ctrlstts_offset)
2078 PCIE_DBG(dev,
2079 "RC%d endpoint does not support PCIe capability registers\n",
2080 dev->rc_idx);
2081 else
2082 PCIE_DBG(dev,
2083 "RC%d: ep_link_ctrlstts_offset: 0x%x\n",
2084 dev->rc_idx, ep_link_ctrlstts_offset);
2085 }
2086
2087 switch (testcase) {
2088 case 0: /* output status */
2089 PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
2090 dev->rc_idx);
2091 msm_pcie_show_status(dev);
2092 break;
2093 case 1: /* disable link */
2094 PCIE_DBG_FS(dev,
2095 "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
2096 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2097 dev->dev, NULL,
2098 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2099 if (ret)
2100 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2101 __func__);
2102 else
2103 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
2104 __func__);
2105 break;
2106 case 2: /* enable link and recover config space for RC and EP */
2107 PCIE_DBG_FS(dev,
2108 "\n\nPCIe: RC%d: enable link and recover config space\n\n",
2109 dev->rc_idx);
2110 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2111 dev->dev, NULL,
2112 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2113 if (ret)
2114 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2115 __func__);
2116 else {
2117 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2118 msm_pcie_recover_config(dev->dev);
2119 }
2120 break;
2121 case 3: /*
2122 * disable and enable link, recover config space for
2123 * RC and EP
2124 */
2125 PCIE_DBG_FS(dev,
2126 "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
2127 dev->rc_idx);
2128 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2129 dev->dev, NULL,
2130 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2131 if (ret)
2132 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2133 __func__);
2134 else
2135 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
2136 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2137 dev->dev, NULL,
2138 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2139 if (ret)
2140 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2141 __func__);
2142 else {
2143 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2144 msm_pcie_recover_config(dev->dev);
2145 }
2146 break;
2147 case 4: /* dump shadow registers for RC and EP */
2148 PCIE_DBG_FS(dev,
2149 "\n\nPCIe: RC%d: dumping RC shadow registers\n",
2150 dev->rc_idx);
2151 msm_pcie_shadow_dump(dev, true);
2152
2153 PCIE_DBG_FS(dev,
2154 "\n\nPCIe: RC%d: dumping EP shadow registers\n",
2155 dev->rc_idx);
2156 msm_pcie_shadow_dump(dev, false);
2157 break;
2158 case 5: /* disable L0s */
2159 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
2160 dev->rc_idx);
2161 msm_pcie_write_mask(dev->dm_core +
2162 PCIE20_CAP_LINKCTRLSTATUS,
2163 BIT(0), 0);
2164 msm_pcie_write_mask(dev->conf +
2165 ep_link_ctrlstts_offset,
2166 BIT(0), 0);
2167 if (dev->shadow_en) {
2168 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2169 readl_relaxed(dev->dm_core +
2170 PCIE20_CAP_LINKCTRLSTATUS);
2171 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2172 readl_relaxed(dev->conf +
2173 ep_link_ctrlstts_offset);
2174 }
2175 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2176 readl_relaxed(dev->dm_core +
2177 PCIE20_CAP_LINKCTRLSTATUS));
2178 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2179 readl_relaxed(dev->conf +
2180 ep_link_ctrlstts_offset));
2181 break;
2182 case 6: /* enable L0s */
2183 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
2184 dev->rc_idx);
2185 msm_pcie_write_mask(dev->dm_core +
2186 PCIE20_CAP_LINKCTRLSTATUS,
2187 0, BIT(0));
2188 msm_pcie_write_mask(dev->conf +
2189 ep_link_ctrlstts_offset,
2190 0, BIT(0));
2191 if (dev->shadow_en) {
2192 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2193 readl_relaxed(dev->dm_core +
2194 PCIE20_CAP_LINKCTRLSTATUS);
2195 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2196 readl_relaxed(dev->conf +
2197 ep_link_ctrlstts_offset);
2198 }
2199 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2200 readl_relaxed(dev->dm_core +
2201 PCIE20_CAP_LINKCTRLSTATUS));
2202 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2203 readl_relaxed(dev->conf +
2204 ep_link_ctrlstts_offset));
2205 break;
2206 case 7: /* disable L1 */
2207 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
2208 dev->rc_idx);
2209 msm_pcie_write_mask(dev->dm_core +
2210 PCIE20_CAP_LINKCTRLSTATUS,
2211 BIT(1), 0);
2212 msm_pcie_write_mask(dev->conf +
2213 ep_link_ctrlstts_offset,
2214 BIT(1), 0);
2215 if (dev->shadow_en) {
2216 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2217 readl_relaxed(dev->dm_core +
2218 PCIE20_CAP_LINKCTRLSTATUS);
2219 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2220 readl_relaxed(dev->conf +
2221 ep_link_ctrlstts_offset);
2222 }
2223 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2224 readl_relaxed(dev->dm_core +
2225 PCIE20_CAP_LINKCTRLSTATUS));
2226 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2227 readl_relaxed(dev->conf +
2228 ep_link_ctrlstts_offset));
2229 break;
2230 case 8: /* enable L1 */
2231 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
2232 dev->rc_idx);
2233 msm_pcie_write_mask(dev->dm_core +
2234 PCIE20_CAP_LINKCTRLSTATUS,
2235 0, BIT(1));
2236 msm_pcie_write_mask(dev->conf +
2237 ep_link_ctrlstts_offset,
2238 0, BIT(1));
2239 if (dev->shadow_en) {
2240 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2241 readl_relaxed(dev->dm_core +
2242 PCIE20_CAP_LINKCTRLSTATUS);
2243 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2244 readl_relaxed(dev->conf +
2245 ep_link_ctrlstts_offset);
2246 }
2247 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2248 readl_relaxed(dev->dm_core +
2249 PCIE20_CAP_LINKCTRLSTATUS));
2250 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2251 readl_relaxed(dev->conf +
2252 ep_link_ctrlstts_offset));
2253 break;
2254 case 9: /* disable L1ss */
2255 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
2256 dev->rc_idx);
2257 current_offset = PCIE_EXT_CAP_OFFSET;
2258 while (current_offset) {
2259 val = readl_relaxed(dev->conf + current_offset);
2260 if ((val & 0xffff) == L1SUB_CAP_ID) {
2261 ep_l1sub_ctrl1_offset =
2262 current_offset + 0x8;
2263 break;
2264 }
2265 current_offset = val >> 20;
2266 }
2267 if (!ep_l1sub_ctrl1_offset) {
2268 PCIE_DBG_FS(dev,
2269 "PCIe: RC%d endpoint does not support l1ss registers\n",
2270 dev->rc_idx);
2271 break;
2272 }
2273
2274 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2275 dev->rc_idx, ep_l1sub_ctrl1_offset);
2276
2277 msm_pcie_write_reg_field(dev->dm_core,
2278 PCIE20_L1SUB_CONTROL1,
2279 0xf, 0);
2280 msm_pcie_write_mask(dev->dm_core +
2281 PCIE20_DEVICE_CONTROL2_STATUS2,
2282 BIT(10), 0);
2283 msm_pcie_write_reg_field(dev->conf,
2284 ep_l1sub_ctrl1_offset,
2285 0xf, 0);
2286 msm_pcie_write_mask(dev->conf +
2287 ep_dev_ctrl2stts2_offset,
2288 BIT(10), 0);
2289 if (dev->shadow_en) {
2290 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2291 readl_relaxed(dev->dm_core +
2292 PCIE20_L1SUB_CONTROL1);
2293 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2294 readl_relaxed(dev->dm_core +
2295 PCIE20_DEVICE_CONTROL2_STATUS2);
2296 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2297 readl_relaxed(dev->conf +
2298 ep_l1sub_ctrl1_offset);
2299 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2300 readl_relaxed(dev->conf +
2301 ep_dev_ctrl2stts2_offset);
2302 }
2303 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2304 readl_relaxed(dev->dm_core +
2305 PCIE20_L1SUB_CONTROL1));
2306 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2307 readl_relaxed(dev->dm_core +
2308 PCIE20_DEVICE_CONTROL2_STATUS2));
2309 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2310 readl_relaxed(dev->conf +
2311 ep_l1sub_ctrl1_offset));
2312 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2313 readl_relaxed(dev->conf +
2314 ep_dev_ctrl2stts2_offset));
2315 break;
2316 case 10: /* enable L1ss */
2317 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
2318 dev->rc_idx);
2319 current_offset = PCIE_EXT_CAP_OFFSET;
2320 while (current_offset) {
2321 val = readl_relaxed(dev->conf + current_offset);
2322 if ((val & 0xffff) == L1SUB_CAP_ID) {
2323 ep_l1sub_cap_reg1_offset =
2324 current_offset + 0x4;
2325 ep_l1sub_ctrl1_offset =
2326 current_offset + 0x8;
2327 break;
2328 }
2329 current_offset = val >> 20;
2330 }
2331 if (!ep_l1sub_ctrl1_offset) {
2332 PCIE_DBG_FS(dev,
2333 "PCIe: RC%d endpoint does not support l1ss registers\n",
2334 dev->rc_idx);
2335 break;
2336 }
2337
2338 val = readl_relaxed(dev->conf +
2339 ep_l1sub_cap_reg1_offset);
2340
2341 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
2342 val);
2343 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2344 dev->rc_idx, ep_l1sub_ctrl1_offset);
2345
2346 val &= 0xf;
2347
2348 msm_pcie_write_reg_field(dev->dm_core,
2349 PCIE20_L1SUB_CONTROL1,
2350 0xf, val);
2351 msm_pcie_write_mask(dev->dm_core +
2352 PCIE20_DEVICE_CONTROL2_STATUS2,
2353 0, BIT(10));
2354 msm_pcie_write_reg_field(dev->conf,
2355 ep_l1sub_ctrl1_offset,
2356 0xf, val);
2357 msm_pcie_write_mask(dev->conf +
2358 ep_dev_ctrl2stts2_offset,
2359 0, BIT(10));
2360 if (dev->shadow_en) {
2361 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2362 readl_relaxed(dev->dm_core +
2363 PCIE20_L1SUB_CONTROL1);
2364 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2365 readl_relaxed(dev->dm_core +
2366 PCIE20_DEVICE_CONTROL2_STATUS2);
2367 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2368 readl_relaxed(dev->conf +
2369 ep_l1sub_ctrl1_offset);
2370 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2371 readl_relaxed(dev->conf +
2372 ep_dev_ctrl2stts2_offset);
2373 }
2374 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2375 readl_relaxed(dev->dm_core +
2376 PCIE20_L1SUB_CONTROL1));
2377 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2378 readl_relaxed(dev->dm_core +
2379 PCIE20_DEVICE_CONTROL2_STATUS2));
2380 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2381 readl_relaxed(dev->conf +
2382 ep_l1sub_ctrl1_offset));
2383 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2384 readl_relaxed(dev->conf +
2385 ep_dev_ctrl2stts2_offset));
2386 break;
2387 case 11: /* enumerate PCIe */
2388 PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
2389 dev->rc_idx);
2390 if (dev->enumerated)
2391 PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
2392 dev->rc_idx);
2393 else {
2394 if (!msm_pcie_enumerate(dev->rc_idx))
2395 PCIE_DBG_FS(dev,
2396 "PCIe: RC%d is successfully enumerated\n",
2397 dev->rc_idx);
2398 else
2399 PCIE_DBG_FS(dev,
2400 "PCIe: RC%d enumeration failed\n",
2401 dev->rc_idx);
2402 }
2403 break;
2404 case 12: /* write a value to a register */
2405 PCIE_DBG_FS(dev,
2406 "\n\nPCIe: RC%d: writing a value to a register\n\n",
2407 dev->rc_idx);
2408
2409 if (!base_sel) {
2410 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2411 break;
2412 }
2413
2414 PCIE_DBG_FS(dev,
2415 "base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
2416 dev->res[base_sel - 1].name,
2417 dev->res[base_sel - 1].base,
2418 wr_offset, wr_mask, wr_value);
2419
Tony Truong95747382017-01-06 14:03:03 -08002420 base_sel_size = resource_size(dev->res[base_sel - 1].resource);
2421
2422 if (wr_offset > base_sel_size - 4 ||
2423 msm_pcie_check_align(dev, wr_offset))
2424 PCIE_DBG_FS(dev,
2425 "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
2426 dev->rc_idx, wr_offset, base_sel_size - 4);
2427 else
2428 msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
2429 wr_offset, wr_mask, wr_value);
Tony Truong349ee492014-10-01 17:35:56 -07002430
2431 break;
2432 case 13: /* dump all registers of base_sel */
2433 if (!base_sel) {
2434 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2435 break;
2436 } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
2437 pcie_parf_dump(dev);
2438 break;
2439 } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
2440 pcie_phy_dump(dev);
2441 break;
2442 } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
2443 base_sel_size = 0x1000;
2444 } else {
2445 base_sel_size = resource_size(
2446 dev->res[base_sel - 1].resource);
2447 }
2448
2449 PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
2450 dev->res[base_sel - 1].name, dev->rc_idx);
2451
2452 for (i = 0; i < base_sel_size; i += 32) {
2453 PCIE_DBG_FS(dev,
2454 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2455 i, readl_relaxed(dev->res[base_sel - 1].base + i),
2456 readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
2457 readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
2458 readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
2459 readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
2460 readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
2461 readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
2462 readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
2463 }
2464 break;
2465 default:
2466 PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
2467 break;
2468 }
2469}
2470
2471int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
2472 u32 offset, u32 mask, u32 value)
2473{
2474 int ret = 0;
2475 struct msm_pcie_dev_t *pdev = NULL;
2476
2477 if (!dev) {
2478 pr_err("PCIe: the input pci dev is NULL.\n");
2479 return -ENODEV;
2480 }
2481
2482 if (option == 12 || option == 13) {
2483 if (!base || base > 5) {
2484 PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
2485 PCIE_DBG_FS(pdev,
2486 "PCIe: base_sel is still 0x%x\n", base_sel);
2487 return -EINVAL;
2488 }
2489
2490 base_sel = base;
2491 PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
2492
2493 if (option == 12) {
2494 wr_offset = offset;
2495 wr_mask = mask;
2496 wr_value = value;
2497
2498 PCIE_DBG_FS(pdev,
2499 "PCIe: wr_offset is now 0x%x\n", wr_offset);
2500 PCIE_DBG_FS(pdev,
2501 "PCIe: wr_mask is now 0x%x\n", wr_mask);
2502 PCIE_DBG_FS(pdev,
2503 "PCIe: wr_value is now 0x%x\n", wr_value);
2504 }
2505 }
2506
2507 pdev = PCIE_BUS_PRIV_DATA(dev->bus);
2508 rc_sel = 1 << pdev->rc_idx;
2509
2510 msm_pcie_sel_debug_testcase(pdev, option);
2511
2512 return ret;
2513}
2514EXPORT_SYMBOL(msm_pcie_debug_info);
2515
2516#ifdef CONFIG_DEBUG_FS
2517static struct dentry *dent_msm_pcie;
2518static struct dentry *dfile_rc_sel;
2519static struct dentry *dfile_case;
2520static struct dentry *dfile_base_sel;
2521static struct dentry *dfile_linkdown_panic;
2522static struct dentry *dfile_wr_offset;
2523static struct dentry *dfile_wr_mask;
2524static struct dentry *dfile_wr_value;
2525static struct dentry *dfile_ep_wakeirq;
2526static struct dentry *dfile_aer_enable;
2527static struct dentry *dfile_corr_counter_limit;
2528
2529static u32 rc_sel_max;
2530
2531static ssize_t msm_pcie_cmd_debug(struct file *file,
2532 const char __user *buf,
2533 size_t count, loff_t *ppos)
2534{
2535 unsigned long ret;
2536 char str[MAX_MSG_LEN];
2537 unsigned int testcase = 0;
2538 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002539 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002540
Tony Truongfdbd5672017-01-06 16:23:14 -08002541 memset(str, 0, size);
2542 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002543 if (ret)
2544 return -EFAULT;
2545
Tony Truongfdbd5672017-01-06 16:23:14 -08002546 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002547 testcase = (testcase * 10) + (str[i] - '0');
2548
2549 if (!rc_sel)
2550 rc_sel = 1;
2551
2552 pr_alert("PCIe: TEST: %d\n", testcase);
2553
2554 for (i = 0; i < MAX_RC_NUM; i++) {
2555 if (!((rc_sel >> i) & 0x1))
2556 continue;
2557 msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
2558 }
2559
2560 return count;
2561}
2562
2563const struct file_operations msm_pcie_cmd_debug_ops = {
2564 .write = msm_pcie_cmd_debug,
2565};
2566
2567static ssize_t msm_pcie_set_rc_sel(struct file *file,
2568 const char __user *buf,
2569 size_t count, loff_t *ppos)
2570{
2571 unsigned long ret;
2572 char str[MAX_MSG_LEN];
2573 int i;
2574 u32 new_rc_sel = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002575 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002576
Tony Truongfdbd5672017-01-06 16:23:14 -08002577 memset(str, 0, size);
2578 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002579 if (ret)
2580 return -EFAULT;
2581
Tony Truongfdbd5672017-01-06 16:23:14 -08002582 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002583 new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
2584
2585 if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
2586 pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
2587 pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
2588 } else {
2589 rc_sel = new_rc_sel;
2590 pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
2591 }
2592
2593 pr_alert("PCIe: the following RC(s) will be tested:\n");
2594 for (i = 0; i < MAX_RC_NUM; i++) {
2595 if (!rc_sel) {
2596 pr_alert("RC %d\n", i);
2597 break;
2598 } else if (rc_sel & (1 << i)) {
2599 pr_alert("RC %d\n", i);
2600 }
2601 }
2602
2603 return count;
2604}
2605
2606const struct file_operations msm_pcie_rc_sel_ops = {
2607 .write = msm_pcie_set_rc_sel,
2608};
2609
2610static ssize_t msm_pcie_set_base_sel(struct file *file,
2611 const char __user *buf,
2612 size_t count, loff_t *ppos)
2613{
2614 unsigned long ret;
2615 char str[MAX_MSG_LEN];
2616 int i;
2617 u32 new_base_sel = 0;
2618 char *base_sel_name;
Tony Truongfdbd5672017-01-06 16:23:14 -08002619 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002620
Tony Truongfdbd5672017-01-06 16:23:14 -08002621 memset(str, 0, size);
2622 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002623 if (ret)
2624 return -EFAULT;
2625
Tony Truongfdbd5672017-01-06 16:23:14 -08002626 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002627 new_base_sel = (new_base_sel * 10) + (str[i] - '0');
2628
2629 if (!new_base_sel || new_base_sel > 5) {
2630 pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
2631 new_base_sel);
2632 pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
2633 } else {
2634 base_sel = new_base_sel;
2635 pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
2636 }
2637
2638 switch (base_sel) {
2639 case 1:
2640 base_sel_name = "PARF";
2641 break;
2642 case 2:
2643 base_sel_name = "PHY";
2644 break;
2645 case 3:
2646 base_sel_name = "RC CONFIG SPACE";
2647 break;
2648 case 4:
2649 base_sel_name = "ELBI";
2650 break;
2651 case 5:
2652 base_sel_name = "EP CONFIG SPACE";
2653 break;
2654 default:
2655 base_sel_name = "INVALID";
2656 break;
2657 }
2658
2659 pr_alert("%s\n", base_sel_name);
2660
2661 return count;
2662}
2663
2664const struct file_operations msm_pcie_base_sel_ops = {
2665 .write = msm_pcie_set_base_sel,
2666};
2667
2668static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
2669 const char __user *buf,
2670 size_t count, loff_t *ppos)
2671{
2672 unsigned long ret;
2673 char str[MAX_MSG_LEN];
2674 u32 new_linkdown_panic = 0;
2675 int i;
2676
2677 memset(str, 0, sizeof(str));
2678 ret = copy_from_user(str, buf, sizeof(str));
2679 if (ret)
2680 return -EFAULT;
2681
2682 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2683 new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
2684
2685 if (new_linkdown_panic <= 1) {
2686 for (i = 0; i < MAX_RC_NUM; i++) {
2687 if (!rc_sel) {
2688 msm_pcie_dev[0].linkdown_panic =
2689 new_linkdown_panic;
2690 PCIE_DBG_FS(&msm_pcie_dev[0],
2691 "PCIe: RC0: linkdown_panic is now %d\n",
2692 msm_pcie_dev[0].linkdown_panic);
2693 break;
2694 } else if (rc_sel & (1 << i)) {
2695 msm_pcie_dev[i].linkdown_panic =
2696 new_linkdown_panic;
2697 PCIE_DBG_FS(&msm_pcie_dev[i],
2698 "PCIe: RC%d: linkdown_panic is now %d\n",
2699 i, msm_pcie_dev[i].linkdown_panic);
2700 }
2701 }
2702 } else {
2703 pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
2704 new_linkdown_panic);
2705 }
2706
2707 return count;
2708}
2709
2710const struct file_operations msm_pcie_linkdown_panic_ops = {
2711 .write = msm_pcie_set_linkdown_panic,
2712};
2713
2714static ssize_t msm_pcie_set_wr_offset(struct file *file,
2715 const char __user *buf,
2716 size_t count, loff_t *ppos)
2717{
2718 unsigned long ret;
2719 char str[MAX_MSG_LEN];
2720 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002721 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002722
Tony Truongfdbd5672017-01-06 16:23:14 -08002723 memset(str, 0, size);
2724 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002725 if (ret)
2726 return -EFAULT;
2727
2728 wr_offset = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002729 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002730 wr_offset = (wr_offset * 10) + (str[i] - '0');
2731
2732 pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
2733
2734 return count;
2735}
2736
2737const struct file_operations msm_pcie_wr_offset_ops = {
2738 .write = msm_pcie_set_wr_offset,
2739};
2740
2741static ssize_t msm_pcie_set_wr_mask(struct file *file,
2742 const char __user *buf,
2743 size_t count, loff_t *ppos)
2744{
2745 unsigned long ret;
2746 char str[MAX_MSG_LEN];
2747 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002748 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002749
Tony Truongfdbd5672017-01-06 16:23:14 -08002750 memset(str, 0, size);
2751 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002752 if (ret)
2753 return -EFAULT;
2754
2755 wr_mask = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002756 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002757 wr_mask = (wr_mask * 10) + (str[i] - '0');
2758
2759 pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
2760
2761 return count;
2762}
2763
2764const struct file_operations msm_pcie_wr_mask_ops = {
2765 .write = msm_pcie_set_wr_mask,
2766};
2767static ssize_t msm_pcie_set_wr_value(struct file *file,
2768 const char __user *buf,
2769 size_t count, loff_t *ppos)
2770{
2771 unsigned long ret;
2772 char str[MAX_MSG_LEN];
2773 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002774 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002775
Tony Truongfdbd5672017-01-06 16:23:14 -08002776 memset(str, 0, size);
2777 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002778 if (ret)
2779 return -EFAULT;
2780
2781 wr_value = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002782 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002783 wr_value = (wr_value * 10) + (str[i] - '0');
2784
2785 pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
2786
2787 return count;
2788}
2789
2790const struct file_operations msm_pcie_wr_value_ops = {
2791 .write = msm_pcie_set_wr_value,
2792};
2793
2794static ssize_t msm_pcie_set_ep_wakeirq(struct file *file,
2795 const char __user *buf,
2796 size_t count, loff_t *ppos)
2797{
2798 unsigned long ret;
2799 char str[MAX_MSG_LEN];
2800 u32 new_ep_wakeirq = 0;
2801 int i;
2802
2803 memset(str, 0, sizeof(str));
2804 ret = copy_from_user(str, buf, sizeof(str));
2805 if (ret)
2806 return -EFAULT;
2807
2808 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2809 new_ep_wakeirq = (new_ep_wakeirq * 10) + (str[i] - '0');
2810
2811 if (new_ep_wakeirq <= 1) {
2812 for (i = 0; i < MAX_RC_NUM; i++) {
2813 if (!rc_sel) {
2814 msm_pcie_dev[0].ep_wakeirq = new_ep_wakeirq;
2815 PCIE_DBG_FS(&msm_pcie_dev[0],
2816 "PCIe: RC0: ep_wakeirq is now %d\n",
2817 msm_pcie_dev[0].ep_wakeirq);
2818 break;
2819 } else if (rc_sel & (1 << i)) {
2820 msm_pcie_dev[i].ep_wakeirq = new_ep_wakeirq;
2821 PCIE_DBG_FS(&msm_pcie_dev[i],
2822 "PCIe: RC%d: ep_wakeirq is now %d\n",
2823 i, msm_pcie_dev[i].ep_wakeirq);
2824 }
2825 }
2826 } else {
2827 pr_err("PCIe: Invalid input for ep_wakeirq: %d. Please enter 0 or 1.\n",
2828 new_ep_wakeirq);
2829 }
2830
2831 return count;
2832}
2833
2834const struct file_operations msm_pcie_ep_wakeirq_ops = {
2835 .write = msm_pcie_set_ep_wakeirq,
2836};
2837
2838static ssize_t msm_pcie_set_aer_enable(struct file *file,
2839 const char __user *buf,
2840 size_t count, loff_t *ppos)
2841{
2842 unsigned long ret;
2843 char str[MAX_MSG_LEN];
2844 u32 new_aer_enable = 0;
2845 u32 temp_rc_sel;
2846 int i;
2847
2848 memset(str, 0, sizeof(str));
2849 ret = copy_from_user(str, buf, sizeof(str));
2850 if (ret)
2851 return -EFAULT;
2852
2853 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2854 new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
2855
2856 if (new_aer_enable > 1) {
2857 pr_err(
2858 "PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
2859 new_aer_enable);
2860 return count;
2861 }
2862
2863 if (rc_sel)
2864 temp_rc_sel = rc_sel;
2865 else
2866 temp_rc_sel = 0x1;
2867
2868 for (i = 0; i < MAX_RC_NUM; i++) {
2869 if (temp_rc_sel & (1 << i)) {
2870 msm_pcie_dev[i].aer_enable = new_aer_enable;
2871 PCIE_DBG_FS(&msm_pcie_dev[i],
2872 "PCIe: RC%d: aer_enable is now %d\n",
2873 i, msm_pcie_dev[i].aer_enable);
2874
2875 msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
2876 PCIE20_BRIDGE_CTRL,
2877 new_aer_enable ? 0 : BIT(16),
2878 new_aer_enable ? BIT(16) : 0);
2879
2880 PCIE_DBG_FS(&msm_pcie_dev[i],
2881 "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
2882 readl_relaxed(msm_pcie_dev[i].dm_core +
2883 PCIE20_BRIDGE_CTRL));
2884 }
2885 }
2886
2887 return count;
2888}
2889
2890const struct file_operations msm_pcie_aer_enable_ops = {
2891 .write = msm_pcie_set_aer_enable,
2892};
2893
2894static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
2895 const char __user *buf,
2896 size_t count, loff_t *ppos)
2897{
2898 unsigned long ret;
2899 char str[MAX_MSG_LEN];
2900 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002901 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002902
Tony Truongfdbd5672017-01-06 16:23:14 -08002903 memset(str, 0, size);
2904 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002905 if (ret)
2906 return -EFAULT;
2907
2908 corr_counter_limit = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002909 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002910 corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
2911
2912 pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
2913
2914 return count;
2915}
2916
2917const struct file_operations msm_pcie_corr_counter_limit_ops = {
2918 .write = msm_pcie_set_corr_counter_limit,
2919};
2920
2921static void msm_pcie_debugfs_init(void)
2922{
2923 rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
2924 wr_mask = 0xffffffff;
2925
2926 dent_msm_pcie = debugfs_create_dir("pci-msm", 0);
2927 if (IS_ERR(dent_msm_pcie)) {
2928 pr_err("PCIe: fail to create the folder for debug_fs.\n");
2929 return;
2930 }
2931
2932 dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
2933 dent_msm_pcie, 0,
2934 &msm_pcie_rc_sel_ops);
2935 if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
2936 pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
2937 goto rc_sel_error;
2938 }
2939
2940 dfile_case = debugfs_create_file("case", 0664,
2941 dent_msm_pcie, 0,
2942 &msm_pcie_cmd_debug_ops);
2943 if (!dfile_case || IS_ERR(dfile_case)) {
2944 pr_err("PCIe: fail to create the file for debug_fs case.\n");
2945 goto case_error;
2946 }
2947
2948 dfile_base_sel = debugfs_create_file("base_sel", 0664,
2949 dent_msm_pcie, 0,
2950 &msm_pcie_base_sel_ops);
2951 if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
2952 pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
2953 goto base_sel_error;
2954 }
2955
2956 dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
2957 dent_msm_pcie, 0,
2958 &msm_pcie_linkdown_panic_ops);
2959 if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
2960 pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
2961 goto linkdown_panic_error;
2962 }
2963
2964 dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
2965 dent_msm_pcie, 0,
2966 &msm_pcie_wr_offset_ops);
2967 if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
2968 pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
2969 goto wr_offset_error;
2970 }
2971
2972 dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
2973 dent_msm_pcie, 0,
2974 &msm_pcie_wr_mask_ops);
2975 if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
2976 pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
2977 goto wr_mask_error;
2978 }
2979
2980 dfile_wr_value = debugfs_create_file("wr_value", 0664,
2981 dent_msm_pcie, 0,
2982 &msm_pcie_wr_value_ops);
2983 if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
2984 pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
2985 goto wr_value_error;
2986 }
2987
2988 dfile_ep_wakeirq = debugfs_create_file("ep_wakeirq", 0664,
2989 dent_msm_pcie, 0,
2990 &msm_pcie_ep_wakeirq_ops);
2991 if (!dfile_ep_wakeirq || IS_ERR(dfile_ep_wakeirq)) {
2992 pr_err("PCIe: fail to create the file for debug_fs ep_wakeirq.\n");
2993 goto ep_wakeirq_error;
2994 }
2995
2996 dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
2997 dent_msm_pcie, 0,
2998 &msm_pcie_aer_enable_ops);
2999 if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
3000 pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
3001 goto aer_enable_error;
3002 }
3003
3004 dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
3005 0664, dent_msm_pcie, 0,
3006 &msm_pcie_corr_counter_limit_ops);
3007 if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
3008 pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
3009 goto corr_counter_limit_error;
3010 }
3011 return;
3012
3013corr_counter_limit_error:
3014 debugfs_remove(dfile_aer_enable);
3015aer_enable_error:
3016 debugfs_remove(dfile_ep_wakeirq);
3017ep_wakeirq_error:
3018 debugfs_remove(dfile_wr_value);
3019wr_value_error:
3020 debugfs_remove(dfile_wr_mask);
3021wr_mask_error:
3022 debugfs_remove(dfile_wr_offset);
3023wr_offset_error:
3024 debugfs_remove(dfile_linkdown_panic);
3025linkdown_panic_error:
3026 debugfs_remove(dfile_base_sel);
3027base_sel_error:
3028 debugfs_remove(dfile_case);
3029case_error:
3030 debugfs_remove(dfile_rc_sel);
3031rc_sel_error:
3032 debugfs_remove(dent_msm_pcie);
3033}
3034
3035static void msm_pcie_debugfs_exit(void)
3036{
3037 debugfs_remove(dfile_rc_sel);
3038 debugfs_remove(dfile_case);
3039 debugfs_remove(dfile_base_sel);
3040 debugfs_remove(dfile_linkdown_panic);
3041 debugfs_remove(dfile_wr_offset);
3042 debugfs_remove(dfile_wr_mask);
3043 debugfs_remove(dfile_wr_value);
3044 debugfs_remove(dfile_ep_wakeirq);
3045 debugfs_remove(dfile_aer_enable);
3046 debugfs_remove(dfile_corr_counter_limit);
3047}
3048#else
3049static void msm_pcie_debugfs_init(void)
3050{
3051}
3052
3053static void msm_pcie_debugfs_exit(void)
3054{
3055}
3056#endif
3057
3058static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
3059{
3060 return readl_relaxed(dev->dm_core +
3061 PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
3062}
3063
3064/**
3065 * msm_pcie_iatu_config - configure outbound address translation region
3066 * @dev: root commpex
3067 * @nr: region number
3068 * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
3069 * @host_addr: - region start address on host
3070 * @host_end: - region end address (low 32 bit) on host,
3071 * upper 32 bits are same as for @host_addr
3072 * @target_addr: - region start address on target
3073 */
3074static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
3075 unsigned long host_addr, u32 host_end,
3076 unsigned long target_addr)
3077{
3078 void __iomem *pcie20 = dev->dm_core;
3079
3080 if (dev->shadow_en) {
3081 dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
3082 nr;
3083 dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
3084 type;
3085 dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
3086 lower_32_bits(host_addr);
3087 dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
3088 upper_32_bits(host_addr);
3089 dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
3090 host_end;
3091 dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
3092 lower_32_bits(target_addr);
3093 dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
3094 upper_32_bits(target_addr);
3095 dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
3096 BIT(31);
3097 }
3098
3099 /* select region */
3100 writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
3101 /* ensure that hardware locks it */
3102 wmb();
3103
3104 /* switch off region before changing it */
3105 writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
3106 /* and wait till it propagates to the hardware */
3107 wmb();
3108
3109 writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
3110 writel_relaxed(lower_32_bits(host_addr),
3111 pcie20 + PCIE20_PLR_IATU_LBAR);
3112 writel_relaxed(upper_32_bits(host_addr),
3113 pcie20 + PCIE20_PLR_IATU_UBAR);
3114 writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
3115 writel_relaxed(lower_32_bits(target_addr),
3116 pcie20 + PCIE20_PLR_IATU_LTAR);
3117 writel_relaxed(upper_32_bits(target_addr),
3118 pcie20 + PCIE20_PLR_IATU_UTAR);
3119 /* ensure that changes propagated to the hardware */
3120 wmb();
3121 writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
3122
3123 /* ensure that changes propagated to the hardware */
3124 wmb();
3125
3126 if (dev->enumerated) {
3127 PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
3128 dev->pcidev_table[nr].bdf >> 24,
3129 dev->pcidev_table[nr].bdf >> 19 & 0x1f,
3130 dev->pcidev_table[nr].bdf >> 16 & 0x07);
3131 PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
3132 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
3133 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
3134 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
3135 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
3136 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
3137 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
3138 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
3139 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
3140 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
3141 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
3142 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
3143 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
3144 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
3145 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
3146 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
3147 }
3148}
3149
3150/**
3151 * msm_pcie_cfg_bdf - configure for config access
3152 * @dev: root commpex
3153 * @bus: PCI bus number
3154 * @devfn: PCI dev and function number
3155 *
3156 * Remap if required region 0 for config access of proper type
3157 * (CFG0 for bus 1, CFG1 for other buses)
3158 * Cache current device bdf for speed-up
3159 */
3160static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
3161{
3162 struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
3163 u32 bdf = BDF_OFFSET(bus, devfn);
3164 u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3165
3166 if (dev->current_bdf == bdf)
3167 return;
3168
3169 msm_pcie_iatu_config(dev, 0, type,
3170 axi_conf->start,
3171 axi_conf->start + SZ_4K - 1,
3172 bdf);
3173
3174 dev->current_bdf = bdf;
3175}
3176
3177static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
3178 u32 word_offset, u32 wr_val,
3179 u32 bdf, bool rc)
3180{
3181 int i, j;
3182 u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
3183
3184 if (rc) {
3185 dev->rc_shadow[word_offset / 4] = wr_val;
3186 } else {
3187 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3188 if (!dev->pcidev_table[i].bdf) {
3189 for (j = 0; j < max_dev; j++)
3190 if (!msm_pcie_dev_tbl[j].bdf) {
3191 msm_pcie_dev_tbl[j].bdf = bdf;
3192 break;
3193 }
3194 dev->pcidev_table[i].bdf = bdf;
3195 if ((!dev->bridge_found) && (i > 0))
3196 dev->bridge_found = true;
3197 }
3198 if (dev->pcidev_table[i].bdf == bdf) {
3199 dev->ep_shadow[i][word_offset / 4] = wr_val;
3200 break;
3201 }
3202 }
3203 }
3204}
3205
3206static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
3207 int where, int size, u32 *val)
3208{
3209 uint32_t word_offset, byte_offset, mask;
3210 uint32_t rd_val, wr_val;
3211 struct msm_pcie_dev_t *dev;
3212 void __iomem *config_base;
3213 bool rc = false;
3214 u32 rc_idx;
3215 int rv = 0;
3216 u32 bdf = BDF_OFFSET(bus->number, devfn);
3217 int i;
3218
3219 dev = PCIE_BUS_PRIV_DATA(bus);
3220
3221 if (!dev) {
3222 pr_err("PCIe: No device found for this bus.\n");
3223 *val = ~0;
3224 rv = PCIBIOS_DEVICE_NOT_FOUND;
3225 goto out;
3226 }
3227
3228 rc_idx = dev->rc_idx;
3229 rc = (bus->number == 0);
3230
3231 spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
3232
3233 if (!dev->cfg_access) {
3234 PCIE_DBG3(dev,
3235 "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
3236 rc_idx, bus->number, devfn, where, size);
3237 *val = ~0;
3238 rv = PCIBIOS_DEVICE_NOT_FOUND;
3239 goto unlock;
3240 }
3241
3242 if (rc && (devfn != 0)) {
3243 PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
3244 (oper == RD) ? "rd" : "wr", bus->number, devfn);
3245 *val = ~0;
3246 rv = PCIBIOS_DEVICE_NOT_FOUND;
3247 goto unlock;
3248 }
3249
3250 if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
3251 PCIE_DBG3(dev,
3252 "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
3253 rc_idx, bus->number, devfn, where, size);
3254 *val = ~0;
3255 rv = PCIBIOS_DEVICE_NOT_FOUND;
3256 goto unlock;
3257 }
3258
3259 /* check if the link is up for endpoint */
3260 if (!rc && !msm_pcie_is_link_up(dev)) {
3261 PCIE_ERR(dev,
3262 "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
3263 rc_idx, (oper == RD) ? "rd" : "wr",
3264 bus->number, devfn);
3265 *val = ~0;
3266 rv = PCIBIOS_DEVICE_NOT_FOUND;
3267 goto unlock;
3268 }
3269
3270 if (!rc && !dev->enumerated)
3271 msm_pcie_cfg_bdf(dev, bus->number, devfn);
3272
3273 word_offset = where & ~0x3;
3274 byte_offset = where & 0x3;
3275 mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset);
3276
3277 if (rc || !dev->enumerated) {
3278 config_base = rc ? dev->dm_core : dev->conf;
3279 } else {
3280 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3281 if (dev->pcidev_table[i].bdf == bdf) {
3282 config_base = dev->pcidev_table[i].conf_base;
3283 break;
3284 }
3285 }
3286 if (i == MAX_DEVICE_NUM) {
3287 *val = ~0;
3288 rv = PCIBIOS_DEVICE_NOT_FOUND;
3289 goto unlock;
3290 }
3291 }
3292
3293 rd_val = readl_relaxed(config_base + word_offset);
3294
3295 if (oper == RD) {
3296 *val = ((rd_val & mask) >> (8 * byte_offset));
3297 PCIE_DBG3(dev,
3298 "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
3299 rc_idx, bus->number, devfn, where, size, *val, rd_val);
3300 } else {
3301 wr_val = (rd_val & ~mask) |
3302 ((*val << (8 * byte_offset)) & mask);
3303
3304 if ((bus->number == 0) && (where == 0x3c))
3305 wr_val = wr_val | (3 << 16);
3306
3307 writel_relaxed(wr_val, config_base + word_offset);
3308 wmb(); /* ensure config data is written to hardware register */
3309
3310 if (rd_val == PCIE_LINK_DOWN)
3311 PCIE_ERR(dev,
3312 "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
3313 rc_idx, bus->number, devfn, where, size);
3314 else if (dev->shadow_en)
3315 msm_pcie_save_shadow(dev, word_offset, wr_val, bdf, rc);
3316
3317 PCIE_DBG3(dev,
3318 "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
3319 rc_idx, bus->number, devfn, where, size,
3320 wr_val, rd_val, *val);
3321 }
3322
3323unlock:
3324 spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
3325out:
3326 return rv;
3327}
3328
3329static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
3330 int size, u32 *val)
3331{
3332 int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
3333
3334 if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
3335 *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
3336 PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
3337 }
3338
3339 return ret;
3340}
3341
3342static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
3343 int where, int size, u32 val)
3344{
3345 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
3346}
3347
3348static struct pci_ops msm_pcie_ops = {
3349 .read = msm_pcie_rd_conf,
3350 .write = msm_pcie_wr_conf,
3351};
3352
3353static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
3354{
3355 int rc = 0, i;
3356 struct msm_pcie_gpio_info_t *info;
3357
3358 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3359
3360 for (i = 0; i < dev->gpio_n; i++) {
3361 info = &dev->gpio[i];
3362
3363 if (!info->num)
3364 continue;
3365
3366 rc = gpio_request(info->num, info->name);
3367 if (rc) {
3368 PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
3369 dev->rc_idx, info->name, rc);
3370 break;
3371 }
3372
3373 if (info->out)
3374 rc = gpio_direction_output(info->num, info->init);
3375 else
3376 rc = gpio_direction_input(info->num);
3377 if (rc) {
3378 PCIE_ERR(dev,
3379 "PCIe: RC%d can't set direction for GPIO %s:%d\n",
3380 dev->rc_idx, info->name, rc);
3381 gpio_free(info->num);
3382 break;
3383 }
3384 }
3385
3386 if (rc)
3387 while (i--)
3388 gpio_free(dev->gpio[i].num);
3389
3390 return rc;
3391}
3392
3393static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
3394{
3395 int i;
3396
3397 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3398
3399 for (i = 0; i < dev->gpio_n; i++)
3400 gpio_free(dev->gpio[i].num);
3401}
3402
3403int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
3404{
3405 int i, rc = 0;
3406 struct regulator *vreg;
3407 struct msm_pcie_vreg_info_t *info;
3408
3409 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3410
3411 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
3412 info = &dev->vreg[i];
3413 vreg = info->hdl;
3414
3415 if (!vreg)
3416 continue;
3417
3418 PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
3419 dev->rc_idx, info->name);
3420 if (info->max_v) {
3421 rc = regulator_set_voltage(vreg,
3422 info->min_v, info->max_v);
3423 if (rc) {
3424 PCIE_ERR(dev,
3425 "PCIe: RC%d can't set voltage for %s: %d\n",
3426 dev->rc_idx, info->name, rc);
3427 break;
3428 }
3429 }
3430
3431 if (info->opt_mode) {
3432 rc = regulator_set_load(vreg, info->opt_mode);
3433 if (rc < 0) {
3434 PCIE_ERR(dev,
3435 "PCIe: RC%d can't set mode for %s: %d\n",
3436 dev->rc_idx, info->name, rc);
3437 break;
3438 }
3439 }
3440
3441 rc = regulator_enable(vreg);
3442 if (rc) {
3443 PCIE_ERR(dev,
3444 "PCIe: RC%d can't enable regulator %s: %d\n",
3445 dev->rc_idx, info->name, rc);
3446 break;
3447 }
3448 }
3449
3450 if (rc)
3451 while (i--) {
3452 struct regulator *hdl = dev->vreg[i].hdl;
3453
3454 if (hdl) {
3455 regulator_disable(hdl);
3456 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3457 PCIE_DBG(dev,
3458 "RC%d: Removing %s vote.\n",
3459 dev->rc_idx,
3460 dev->vreg[i].name);
3461 regulator_set_voltage(hdl,
3462 RPM_REGULATOR_CORNER_NONE,
3463 INT_MAX);
3464 }
3465 }
3466
3467 }
3468
3469 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3470
3471 return rc;
3472}
3473
3474static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
3475{
3476 int i;
3477
3478 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3479
3480 for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
3481 if (dev->vreg[i].hdl) {
3482 PCIE_DBG(dev, "Vreg %s is being disabled\n",
3483 dev->vreg[i].name);
3484 regulator_disable(dev->vreg[i].hdl);
3485
3486 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3487 PCIE_DBG(dev,
3488 "RC%d: Removing %s vote.\n",
3489 dev->rc_idx,
3490 dev->vreg[i].name);
3491 regulator_set_voltage(dev->vreg[i].hdl,
3492 RPM_REGULATOR_CORNER_NONE,
3493 INT_MAX);
3494 }
3495 }
3496 }
3497
3498 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3499}
3500
3501static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
3502{
3503 int i, rc = 0;
3504 struct msm_pcie_clk_info_t *info;
3505 struct msm_pcie_reset_info_t *reset_info;
3506
3507 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3508
3509 rc = regulator_enable(dev->gdsc);
3510
3511 if (rc) {
3512 PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
3513 dev->rc_idx, dev->pdev->name);
3514 return rc;
3515 }
3516
3517 if (dev->gdsc_smmu) {
3518 rc = regulator_enable(dev->gdsc_smmu);
3519
3520 if (rc) {
3521 PCIE_ERR(dev,
3522 "PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
3523 dev->rc_idx, dev->pdev->name);
3524 return rc;
3525 }
3526 }
3527
3528 PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
3529 if (dev->bus_client) {
3530 rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
3531 if (rc) {
3532 PCIE_ERR(dev,
3533 "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
3534 dev->rc_idx, rc);
3535 return rc;
3536 }
3537
3538 PCIE_DBG2(dev,
3539 "PCIe: set bus bandwidth for RC%d.\n",
3540 dev->rc_idx);
3541 }
3542
3543 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3544 info = &dev->clk[i];
3545
3546 if (!info->hdl)
3547 continue;
3548
3549 if (info->config_mem)
3550 msm_pcie_config_clock_mem(dev, info);
3551
3552 if (info->freq) {
3553 rc = clk_set_rate(info->hdl, info->freq);
3554 if (rc) {
3555 PCIE_ERR(dev,
3556 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3557 dev->rc_idx, info->name, rc);
3558 break;
3559 }
3560
3561 PCIE_DBG2(dev,
3562 "PCIe: RC%d set rate for clk %s.\n",
3563 dev->rc_idx, info->name);
3564 }
3565
3566 rc = clk_prepare_enable(info->hdl);
3567
3568 if (rc)
3569 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
3570 dev->rc_idx, info->name);
3571 else
3572 PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
3573 info->name, dev->rc_idx);
3574 }
3575
3576 if (rc) {
3577 PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
3578 dev->rc_idx);
3579 while (i--) {
3580 struct clk *hdl = dev->clk[i].hdl;
3581
3582 if (hdl)
3583 clk_disable_unprepare(hdl);
3584 }
3585
3586 if (dev->gdsc_smmu)
3587 regulator_disable(dev->gdsc_smmu);
3588
3589 regulator_disable(dev->gdsc);
3590 }
3591
3592 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3593 reset_info = &dev->reset[i];
3594 if (reset_info->hdl) {
3595 rc = reset_control_deassert(reset_info->hdl);
3596 if (rc)
3597 PCIE_ERR(dev,
3598 "PCIe: RC%d failed to deassert reset for %s.\n",
3599 dev->rc_idx, reset_info->name);
3600 else
3601 PCIE_DBG2(dev,
3602 "PCIe: RC%d successfully deasserted reset for %s.\n",
3603 dev->rc_idx, reset_info->name);
3604 }
3605 }
3606
3607 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3608
3609 return rc;
3610}
3611
3612static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
3613{
3614 int i;
3615 int rc;
3616
3617 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3618
3619 for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
3620 if (dev->clk[i].hdl)
3621 clk_disable_unprepare(dev->clk[i].hdl);
3622
3623 if (dev->bus_client) {
3624 PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
3625 dev->rc_idx);
3626
3627 rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
3628 if (rc)
3629 PCIE_ERR(dev,
3630 "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
3631 dev->rc_idx, rc);
3632 else
3633 PCIE_DBG(dev,
3634 "PCIe: relinquish bus bandwidth for RC%d.\n",
3635 dev->rc_idx);
3636 }
3637
3638 if (dev->gdsc_smmu)
3639 regulator_disable(dev->gdsc_smmu);
3640
3641 regulator_disable(dev->gdsc);
3642
3643 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3644}
3645
3646static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
3647{
3648 int i, rc = 0;
3649 struct msm_pcie_clk_info_t *info;
3650 struct msm_pcie_reset_info_t *pipe_reset_info;
3651
3652 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3653
3654 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3655 info = &dev->pipeclk[i];
3656
3657 if (!info->hdl)
3658 continue;
3659
3660
3661 if (info->config_mem)
3662 msm_pcie_config_clock_mem(dev, info);
3663
3664 if (info->freq) {
3665 rc = clk_set_rate(info->hdl, info->freq);
3666 if (rc) {
3667 PCIE_ERR(dev,
3668 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3669 dev->rc_idx, info->name, rc);
3670 break;
3671 }
3672
3673 PCIE_DBG2(dev,
3674 "PCIe: RC%d set rate for clk %s: %d.\n",
3675 dev->rc_idx, info->name, rc);
3676 }
3677
3678 rc = clk_prepare_enable(info->hdl);
3679
3680 if (rc)
3681 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
3682 dev->rc_idx, info->name);
3683 else
3684 PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
3685 dev->rc_idx, info->name);
3686 }
3687
3688 if (rc) {
3689 PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
3690 dev->rc_idx);
3691 while (i--)
3692 if (dev->pipeclk[i].hdl)
3693 clk_disable_unprepare(dev->pipeclk[i].hdl);
3694 }
3695
3696 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3697 pipe_reset_info = &dev->pipe_reset[i];
3698 if (pipe_reset_info->hdl) {
3699 rc = reset_control_deassert(
3700 pipe_reset_info->hdl);
3701 if (rc)
3702 PCIE_ERR(dev,
3703 "PCIe: RC%d failed to deassert pipe reset for %s.\n",
3704 dev->rc_idx, pipe_reset_info->name);
3705 else
3706 PCIE_DBG2(dev,
3707 "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
3708 dev->rc_idx, pipe_reset_info->name);
3709 }
3710 }
3711
3712 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3713
3714 return rc;
3715}
3716
3717static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
3718{
3719 int i;
3720
3721 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3722
3723 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
3724 if (dev->pipeclk[i].hdl)
3725 clk_disable_unprepare(
3726 dev->pipeclk[i].hdl);
3727
3728 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3729}
3730
3731static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
3732{
3733 int i;
3734 u8 type;
3735 struct msm_pcie_device_info *dev_table = dev->pcidev_table;
3736
3737 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3738 if (!dev_table[i].bdf)
3739 break;
3740
3741 type = dev_table[i].bdf >> 24 == 0x1 ?
3742 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3743
3744 msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
3745 dev_table[i].phy_address + SZ_4K - 1,
3746 dev_table[i].bdf);
3747 }
3748}
3749
3750static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
3751{
3752 int i;
3753
3754 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3755
3756 /*
3757 * program and enable address translation region 0 (device config
3758 * address space); region type config;
3759 * axi config address range to device config address range
3760 */
3761 if (dev->enumerated) {
3762 msm_pcie_iatu_config_all_ep(dev);
3763 } else {
3764 dev->current_bdf = 0; /* to force IATU re-config */
3765 msm_pcie_cfg_bdf(dev, 1, 0);
3766 }
3767
3768 /* configure N_FTS */
3769 PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3770 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3771 if (!dev->n_fts)
3772 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3773 0, BIT(15));
3774 else
3775 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3776 PCIE20_ACK_N_FTS,
3777 dev->n_fts << 8);
3778
3779 if (dev->shadow_en)
3780 dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
3781 readl_relaxed(dev->dm_core +
3782 PCIE20_ACK_F_ASPM_CTRL_REG);
3783
3784 PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3785 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3786
3787 /* configure AUX clock frequency register for PCIe core */
3788 if (dev->use_19p2mhz_aux_clk)
3789 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
3790 else
3791 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
3792
3793 /* configure the completion timeout value for PCIe core */
3794 if (dev->cpl_timeout && dev->bridge_found)
3795 msm_pcie_write_reg_field(dev->dm_core,
3796 PCIE20_DEVICE_CONTROL2_STATUS2,
3797 0xf, dev->cpl_timeout);
3798
3799 /* Enable AER on RC */
3800 if (dev->aer_enable) {
3801 msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
3802 BIT(16)|BIT(17));
3803 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
3804 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3805
3806 PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3807 readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
3808 }
3809
3810 /* configure SMMU registers */
3811 if (dev->smmu_exist) {
3812 msm_pcie_write_reg(dev->parf,
3813 PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
3814 msm_pcie_write_reg(dev->parf,
3815 PCIE20_PARF_SID_OFFSET, 0);
3816
3817 if (dev->enumerated) {
3818 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3819 if (dev->pcidev_table[i].dev &&
3820 dev->pcidev_table[i].short_bdf) {
3821 msm_pcie_write_reg(dev->parf,
3822 PCIE20_PARF_BDF_TRANSLATE_N +
3823 dev->pcidev_table[i].short_bdf
3824 * 4,
3825 dev->pcidev_table[i].bdf >> 16);
3826 }
3827 }
3828 }
3829 }
3830}
3831
3832static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
3833{
3834 u32 val;
3835 u32 current_offset;
3836 u32 ep_l1sub_ctrl1_offset = 0;
3837 u32 ep_l1sub_cap_reg1_offset = 0;
3838 u32 ep_link_cap_offset = 0;
3839 u32 ep_link_ctrlstts_offset = 0;
3840 u32 ep_dev_ctrl2stts2_offset = 0;
3841
3842 /* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/
3843 if (!dev->aux_clk_sync && dev->l1ss_supported)
3844 msm_pcie_write_mask(dev->parf +
3845 PCIE20_PARF_SYS_CTRL, BIT(3), 0);
3846
3847 current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
3848
3849 while (current_offset) {
3850 if (msm_pcie_check_align(dev, current_offset))
3851 return;
3852
3853 val = readl_relaxed(dev->conf + current_offset);
3854 if ((val & 0xff) == PCIE20_CAP_ID) {
3855 ep_link_cap_offset = current_offset + 0x0c;
3856 ep_link_ctrlstts_offset = current_offset + 0x10;
3857 ep_dev_ctrl2stts2_offset = current_offset + 0x28;
3858 break;
3859 }
3860 current_offset = (val >> 8) & 0xff;
3861 }
3862
3863 if (!ep_link_cap_offset) {
3864 PCIE_DBG(dev,
3865 "RC%d endpoint does not support PCIe capability registers\n",
3866 dev->rc_idx);
3867 return;
3868 }
3869
3870 PCIE_DBG(dev,
3871 "RC%d: ep_link_cap_offset: 0x%x\n",
3872 dev->rc_idx, ep_link_cap_offset);
3873
3874 if (dev->common_clk_en) {
3875 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3876 0, BIT(6));
3877
3878 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3879 0, BIT(6));
3880
3881 if (dev->shadow_en) {
3882 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3883 readl_relaxed(dev->dm_core +
3884 PCIE20_CAP_LINKCTRLSTATUS);
3885
3886 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3887 readl_relaxed(dev->conf +
3888 ep_link_ctrlstts_offset);
3889 }
3890
3891 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3892 readl_relaxed(dev->dm_core +
3893 PCIE20_CAP_LINKCTRLSTATUS));
3894 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3895 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3896 }
3897
3898 if (dev->clk_power_manage_en) {
3899 val = readl_relaxed(dev->conf + ep_link_cap_offset);
3900 if (val & BIT(18)) {
3901 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3902 0, BIT(8));
3903
3904 if (dev->shadow_en)
3905 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3906 readl_relaxed(dev->conf +
3907 ep_link_ctrlstts_offset);
3908
3909 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3910 readl_relaxed(dev->conf +
3911 ep_link_ctrlstts_offset));
3912 }
3913 }
3914
3915 if (dev->l0s_supported) {
3916 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3917 0, BIT(0));
3918 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3919 0, BIT(0));
3920 if (dev->shadow_en) {
3921 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3922 readl_relaxed(dev->dm_core +
3923 PCIE20_CAP_LINKCTRLSTATUS);
3924 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3925 readl_relaxed(dev->conf +
3926 ep_link_ctrlstts_offset);
3927 }
3928 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3929 readl_relaxed(dev->dm_core +
3930 PCIE20_CAP_LINKCTRLSTATUS));
3931 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3932 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3933 }
3934
3935 if (dev->l1_supported) {
3936 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3937 0, BIT(1));
3938 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3939 0, BIT(1));
3940 if (dev->shadow_en) {
3941 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3942 readl_relaxed(dev->dm_core +
3943 PCIE20_CAP_LINKCTRLSTATUS);
3944 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3945 readl_relaxed(dev->conf +
3946 ep_link_ctrlstts_offset);
3947 }
3948 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3949 readl_relaxed(dev->dm_core +
3950 PCIE20_CAP_LINKCTRLSTATUS));
3951 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3952 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3953 }
3954
3955 if (dev->l1ss_supported) {
3956 current_offset = PCIE_EXT_CAP_OFFSET;
3957 while (current_offset) {
3958 if (msm_pcie_check_align(dev, current_offset))
3959 return;
3960
3961 val = readl_relaxed(dev->conf + current_offset);
3962 if ((val & 0xffff) == L1SUB_CAP_ID) {
3963 ep_l1sub_cap_reg1_offset = current_offset + 0x4;
3964 ep_l1sub_ctrl1_offset = current_offset + 0x8;
3965 break;
3966 }
3967 current_offset = val >> 20;
3968 }
3969 if (!ep_l1sub_ctrl1_offset) {
3970 PCIE_DBG(dev,
3971 "RC%d endpoint does not support l1ss registers\n",
3972 dev->rc_idx);
3973 return;
3974 }
3975
3976 val = readl_relaxed(dev->conf + ep_l1sub_cap_reg1_offset);
3977
3978 PCIE_DBG2(dev, "EP's L1SUB_CAPABILITY_REG_1: 0x%x\n", val);
3979 PCIE_DBG2(dev, "RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
3980 dev->rc_idx, ep_l1sub_ctrl1_offset);
3981
3982 val &= 0xf;
3983
3984 msm_pcie_write_reg_field(dev->dm_core, PCIE20_L1SUB_CONTROL1,
3985 0xf, val);
3986 msm_pcie_write_mask(dev->dm_core +
3987 PCIE20_DEVICE_CONTROL2_STATUS2,
3988 0, BIT(10));
3989 msm_pcie_write_reg_field(dev->conf, ep_l1sub_ctrl1_offset,
3990 0xf, val);
3991 msm_pcie_write_mask(dev->conf + ep_dev_ctrl2stts2_offset,
3992 0, BIT(10));
3993 if (dev->shadow_en) {
3994 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
3995 readl_relaxed(dev->dm_core +
3996 PCIE20_L1SUB_CONTROL1);
3997 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
3998 readl_relaxed(dev->dm_core +
3999 PCIE20_DEVICE_CONTROL2_STATUS2);
4000 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
4001 readl_relaxed(dev->conf +
4002 ep_l1sub_ctrl1_offset);
4003 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
4004 readl_relaxed(dev->conf +
4005 ep_dev_ctrl2stts2_offset);
4006 }
4007 PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n",
4008 readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1));
4009 PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
4010 readl_relaxed(dev->dm_core +
4011 PCIE20_DEVICE_CONTROL2_STATUS2));
4012 PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n",
4013 readl_relaxed(dev->conf + ep_l1sub_ctrl1_offset));
4014 PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
4015 readl_relaxed(dev->conf +
4016 ep_dev_ctrl2stts2_offset));
4017 }
4018}
4019
4020void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
4021{
4022 int i;
4023
4024 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4025
4026 /* program MSI controller and enable all interrupts */
4027 writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
4028 writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
4029
4030 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
4031 writel_relaxed(~0, dev->dm_core +
4032 PCIE20_MSI_CTRL_INTR_EN + (i * 12));
4033
4034 /* ensure that hardware is configured before proceeding */
4035 wmb();
4036}
4037
4038static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
4039 struct platform_device *pdev)
4040{
4041 int i, len, cnt, ret = 0, size = 0;
4042 struct msm_pcie_vreg_info_t *vreg_info;
4043 struct msm_pcie_gpio_info_t *gpio_info;
4044 struct msm_pcie_clk_info_t *clk_info;
4045 struct resource *res;
4046 struct msm_pcie_res_info_t *res_info;
4047 struct msm_pcie_irq_info_t *irq_info;
4048 struct msm_pcie_irq_info_t *msi_info;
4049 struct msm_pcie_reset_info_t *reset_info;
4050 struct msm_pcie_reset_info_t *pipe_reset_info;
4051 char prop_name[MAX_PROP_SIZE];
4052 const __be32 *prop;
4053 u32 *clkfreq = NULL;
4054
4055 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4056
4057 cnt = of_property_count_strings((&pdev->dev)->of_node,
4058 "clock-names");
4059 if (cnt > 0) {
4060 clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
4061 sizeof(*clkfreq), GFP_KERNEL);
4062 if (!clkfreq) {
4063 PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
4064 dev->rc_idx);
4065 return -ENOMEM;
4066 }
4067 ret = of_property_read_u32_array(
4068 (&pdev->dev)->of_node,
4069 "max-clock-frequency-hz", clkfreq, cnt);
4070 if (ret) {
4071 PCIE_ERR(dev,
4072 "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
4073 dev->rc_idx, ret);
4074 goto out;
4075 }
4076 }
4077
4078 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
4079 vreg_info = &dev->vreg[i];
4080 vreg_info->hdl =
4081 devm_regulator_get(&pdev->dev, vreg_info->name);
4082
4083 if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
4084 PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
4085 vreg_info->name);
4086 ret = PTR_ERR(vreg_info->hdl);
4087 goto out;
4088 }
4089
4090 if (IS_ERR(vreg_info->hdl)) {
4091 if (vreg_info->required) {
4092 PCIE_DBG(dev, "Vreg %s doesn't exist\n",
4093 vreg_info->name);
4094 ret = PTR_ERR(vreg_info->hdl);
4095 goto out;
4096 } else {
4097 PCIE_DBG(dev,
4098 "Optional Vreg %s doesn't exist\n",
4099 vreg_info->name);
4100 vreg_info->hdl = NULL;
4101 }
4102 } else {
4103 dev->vreg_n++;
4104 snprintf(prop_name, MAX_PROP_SIZE,
4105 "qcom,%s-voltage-level", vreg_info->name);
4106 prop = of_get_property((&pdev->dev)->of_node,
4107 prop_name, &len);
4108 if (!prop || (len != (3 * sizeof(__be32)))) {
4109 PCIE_DBG(dev, "%s %s property\n",
4110 prop ? "invalid format" :
4111 "no", prop_name);
4112 } else {
4113 vreg_info->max_v = be32_to_cpup(&prop[0]);
4114 vreg_info->min_v = be32_to_cpup(&prop[1]);
4115 vreg_info->opt_mode =
4116 be32_to_cpup(&prop[2]);
4117 }
4118 }
4119 }
4120
4121 dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
4122
4123 if (IS_ERR(dev->gdsc)) {
4124 PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
4125 dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
4126 if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
4127 PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
4128 dev->pdev->name);
4129 ret = PTR_ERR(dev->gdsc);
4130 goto out;
4131 }
4132
4133 dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
4134
4135 if (IS_ERR(dev->gdsc_smmu)) {
4136 PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
4137 dev->rc_idx);
4138 dev->gdsc_smmu = NULL;
4139 }
4140
4141 dev->gpio_n = 0;
4142 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
4143 gpio_info = &dev->gpio[i];
4144 ret = of_get_named_gpio((&pdev->dev)->of_node,
4145 gpio_info->name, 0);
4146 if (ret >= 0) {
4147 gpio_info->num = ret;
4148 dev->gpio_n++;
4149 PCIE_DBG(dev, "GPIO num for %s is %d\n",
4150 gpio_info->name, gpio_info->num);
4151 } else {
4152 if (gpio_info->required) {
4153 PCIE_ERR(dev,
4154 "Could not get required GPIO %s\n",
4155 gpio_info->name);
4156 goto out;
4157 } else {
4158 PCIE_DBG(dev,
4159 "Could not get optional GPIO %s\n",
4160 gpio_info->name);
4161 }
4162 }
4163 ret = 0;
4164 }
4165
4166 of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
4167 if (size) {
4168 dev->phy_sequence = (struct msm_pcie_phy_info_t *)
4169 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4170
4171 if (dev->phy_sequence) {
4172 dev->phy_len =
4173 size / sizeof(*dev->phy_sequence);
4174
4175 of_property_read_u32_array(pdev->dev.of_node,
4176 "qcom,phy-sequence",
4177 (unsigned int *)dev->phy_sequence,
4178 size / sizeof(dev->phy_sequence->offset));
4179 } else {
4180 PCIE_ERR(dev,
4181 "RC%d: Could not allocate memory for phy init sequence.\n",
4182 dev->rc_idx);
4183 ret = -ENOMEM;
4184 goto out;
4185 }
4186 } else {
4187 PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
4188 dev->rc_idx);
4189 }
4190
4191 of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
4192 if (size) {
4193 dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
4194 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4195
4196 if (dev->port_phy_sequence) {
4197 dev->port_phy_len =
4198 size / sizeof(*dev->port_phy_sequence);
4199
4200 of_property_read_u32_array(pdev->dev.of_node,
4201 "qcom,port-phy-sequence",
4202 (unsigned int *)dev->port_phy_sequence,
4203 size / sizeof(dev->port_phy_sequence->offset));
4204 } else {
4205 PCIE_ERR(dev,
4206 "RC%d: Could not allocate memory for port phy init sequence.\n",
4207 dev->rc_idx);
4208 ret = -ENOMEM;
4209 goto out;
4210 }
4211 } else {
4212 PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
4213 dev->rc_idx);
4214 }
4215
4216 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
4217 clk_info = &dev->clk[i];
4218
4219 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4220
4221 if (IS_ERR(clk_info->hdl)) {
4222 if (clk_info->required) {
4223 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4224 clk_info->name, PTR_ERR(clk_info->hdl));
4225 ret = PTR_ERR(clk_info->hdl);
4226 goto out;
4227 } else {
4228 PCIE_DBG(dev, "Ignoring Clock %s\n",
4229 clk_info->name);
4230 clk_info->hdl = NULL;
4231 }
4232 } else {
4233 if (clkfreq != NULL) {
4234 clk_info->freq = clkfreq[i +
4235 MSM_PCIE_MAX_PIPE_CLK];
4236 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4237 clk_info->name, clk_info->freq);
4238 }
4239 }
4240 }
4241
4242 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
4243 clk_info = &dev->pipeclk[i];
4244
4245 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4246
4247 if (IS_ERR(clk_info->hdl)) {
4248 if (clk_info->required) {
4249 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4250 clk_info->name, PTR_ERR(clk_info->hdl));
4251 ret = PTR_ERR(clk_info->hdl);
4252 goto out;
4253 } else {
4254 PCIE_DBG(dev, "Ignoring Clock %s\n",
4255 clk_info->name);
4256 clk_info->hdl = NULL;
4257 }
4258 } else {
4259 if (clkfreq != NULL) {
4260 clk_info->freq = clkfreq[i];
4261 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4262 clk_info->name, clk_info->freq);
4263 }
4264 }
4265 }
4266
4267 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
4268 reset_info = &dev->reset[i];
4269
4270 reset_info->hdl = devm_reset_control_get(&pdev->dev,
4271 reset_info->name);
4272
4273 if (IS_ERR(reset_info->hdl)) {
4274 if (reset_info->required) {
4275 PCIE_DBG(dev,
4276 "Reset %s isn't available:%ld\n",
4277 reset_info->name,
4278 PTR_ERR(reset_info->hdl));
4279
4280 ret = PTR_ERR(reset_info->hdl);
4281 reset_info->hdl = NULL;
4282 goto out;
4283 } else {
4284 PCIE_DBG(dev, "Ignoring Reset %s\n",
4285 reset_info->name);
4286 reset_info->hdl = NULL;
4287 }
4288 }
4289 }
4290
4291 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
4292 pipe_reset_info = &dev->pipe_reset[i];
4293
4294 pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
4295 pipe_reset_info->name);
4296
4297 if (IS_ERR(pipe_reset_info->hdl)) {
4298 if (pipe_reset_info->required) {
4299 PCIE_DBG(dev,
4300 "Pipe Reset %s isn't available:%ld\n",
4301 pipe_reset_info->name,
4302 PTR_ERR(pipe_reset_info->hdl));
4303
4304 ret = PTR_ERR(pipe_reset_info->hdl);
4305 pipe_reset_info->hdl = NULL;
4306 goto out;
4307 } else {
4308 PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
4309 pipe_reset_info->name);
4310 pipe_reset_info->hdl = NULL;
4311 }
4312 }
4313 }
4314
4315 dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
4316 if (!dev->bus_scale_table) {
4317 PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
4318 dev->rc_idx, dev->pdev->name);
4319 dev->bus_client = 0;
4320 } else {
4321 dev->bus_client =
4322 msm_bus_scale_register_client(dev->bus_scale_table);
4323 if (!dev->bus_client) {
4324 PCIE_ERR(dev,
4325 "PCIe: Failed to register bus client for RC%d (%s)\n",
4326 dev->rc_idx, dev->pdev->name);
4327 msm_bus_cl_clear_pdata(dev->bus_scale_table);
4328 ret = -ENODEV;
4329 goto out;
4330 }
4331 }
4332
4333 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
4334 res_info = &dev->res[i];
4335
4336 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4337 res_info->name);
4338
4339 if (!res) {
4340 PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
4341 dev->rc_idx, res_info->name);
4342 } else {
4343 PCIE_DBG(dev, "start addr for %s is %pa.\n",
4344 res_info->name, &res->start);
4345
4346 res_info->base = devm_ioremap(&pdev->dev,
4347 res->start, resource_size(res));
4348 if (!res_info->base) {
4349 PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
4350 dev->rc_idx, res_info->name);
4351 ret = -ENOMEM;
4352 goto out;
4353 } else {
4354 res_info->resource = res;
4355 }
4356 }
4357 }
4358
4359 for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
4360 irq_info = &dev->irq[i];
4361
4362 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4363 irq_info->name);
4364
4365 if (!res) {
4366 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4367 dev->rc_idx, irq_info->name);
4368 } else {
4369 irq_info->num = res->start;
4370 PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
4371 irq_info->num);
4372 }
4373 }
4374
4375 for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
4376 msi_info = &dev->msi[i];
4377
4378 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4379 msi_info->name);
4380
4381 if (!res) {
4382 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4383 dev->rc_idx, msi_info->name);
4384 } else {
4385 msi_info->num = res->start;
4386 PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
4387 msi_info->num);
4388 }
4389 }
4390
4391 /* All allocations succeeded */
4392
4393 if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
4394 dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
4395 else
4396 dev->wake_n = 0;
4397
4398 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
4399 dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
4400 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
4401 dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
4402 dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
4403 dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
4404 dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
4405 dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
4406 dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
4407 dev->dev_io_res->flags = IORESOURCE_IO;
4408
4409out:
4410 kfree(clkfreq);
4411
4412 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4413
4414 return ret;
4415}
4416
4417static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
4418{
4419 dev->parf = NULL;
4420 dev->elbi = NULL;
4421 dev->dm_core = NULL;
4422 dev->conf = NULL;
4423 dev->bars = NULL;
4424 dev->tcsr = NULL;
4425 dev->dev_mem_res = NULL;
4426 dev->dev_io_res = NULL;
4427}
4428
4429int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
4430{
4431 int ret = 0;
4432 uint32_t val;
4433 long int retries = 0;
4434 int link_check_count = 0;
4435
4436 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4437
4438 mutex_lock(&dev->setup_lock);
4439
4440 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
4441 PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
4442 dev->rc_idx);
4443 goto out;
4444 }
4445
4446 /* assert PCIe reset link to keep EP in reset */
4447
4448 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4449 dev->rc_idx);
4450 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4451 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4452 usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
4453 PERST_PROPAGATION_DELAY_US_MAX);
4454
4455 /* enable power */
4456
4457 if (options & PM_VREG) {
4458 ret = msm_pcie_vreg_init(dev);
4459 if (ret)
4460 goto out;
4461 }
4462
4463 /* enable clocks */
4464 if (options & PM_CLK) {
4465 ret = msm_pcie_clk_init(dev);
4466 /* ensure that changes propagated to the hardware */
4467 wmb();
4468 if (ret)
4469 goto clk_fail;
4470 }
4471
4472 if (dev->scm_dev_id) {
4473 PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
4474 msm_pcie_restore_sec_config(dev);
4475 }
4476
4477 /* enable PCIe clocks and resets */
4478 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
4479
4480 /* change DBI base address */
4481 writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
4482
4483 writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
4484
4485 msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
4486 0, BIT(4));
4487
4488 /* enable selected IRQ */
4489 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
4490 msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
4491
4492 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
4493 BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
4494 BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
4495 BIT(MSM_PCIE_INT_EVT_AER_ERR) |
4496 BIT(MSM_PCIE_INT_EVT_MSI_0) |
4497 BIT(MSM_PCIE_INT_EVT_MSI_1) |
4498 BIT(MSM_PCIE_INT_EVT_MSI_2) |
4499 BIT(MSM_PCIE_INT_EVT_MSI_3) |
4500 BIT(MSM_PCIE_INT_EVT_MSI_4) |
4501 BIT(MSM_PCIE_INT_EVT_MSI_5) |
4502 BIT(MSM_PCIE_INT_EVT_MSI_6) |
4503 BIT(MSM_PCIE_INT_EVT_MSI_7));
4504
4505 PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
4506 dev->rc_idx,
4507 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
4508 }
4509
4510 if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_16M)
4511 writel_relaxed(SZ_32M, dev->parf +
4512 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4513 else if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_8M)
4514 writel_relaxed(SZ_16M, dev->parf +
4515 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4516 else
4517 writel_relaxed(SZ_8M, dev->parf +
4518 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4519
4520 if (dev->use_msi) {
4521 PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
4522 val = dev->wr_halt_size ? dev->wr_halt_size :
4523 readl_relaxed(dev->parf +
4524 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
4525
4526 msm_pcie_write_reg(dev->parf,
4527 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
4528 BIT(31) | val);
4529
4530 PCIE_DBG(dev,
4531 "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
4532 dev->rc_idx,
4533 readl_relaxed(dev->parf +
4534 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
4535 }
4536
4537 mutex_lock(&com_phy_lock);
4538 /* init PCIe PHY */
4539 if (!num_rc_on)
4540 pcie_phy_init(dev);
4541
4542 num_rc_on++;
4543 mutex_unlock(&com_phy_lock);
4544
4545 if (options & PM_PIPE_CLK) {
4546 usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
4547 PHY_STABILIZATION_DELAY_US_MAX);
4548 /* Enable the pipe clock */
4549 ret = msm_pcie_pipe_clk_init(dev);
4550 /* ensure that changes propagated to the hardware */
4551 wmb();
4552 if (ret)
4553 goto link_fail;
4554 }
4555
4556 PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
4557
4558 do {
4559 if (pcie_phy_is_ready(dev))
4560 break;
4561 retries++;
4562 usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
4563 REFCLK_STABILIZATION_DELAY_US_MAX);
4564 } while (retries < PHY_READY_TIMEOUT_COUNT);
4565
4566 PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
4567 dev->rc_idx, retries);
4568
4569 if (pcie_phy_is_ready(dev))
4570 PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
4571 else {
4572 PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
4573 dev->rc_idx);
4574 ret = -ENODEV;
4575 pcie_phy_dump(dev);
4576 goto link_fail;
4577 }
4578
4579 pcie_pcs_port_phy_init(dev);
4580
4581 if (dev->ep_latency)
4582 usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
4583
4584 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4585 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4586 dev->gpio[MSM_PCIE_GPIO_EP].on);
4587
4588 /* de-assert PCIe reset link to bring EP out of reset */
4589
4590 PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
4591 dev->rc_idx);
4592 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4593 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
4594 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
4595
4596 /* set max tlp read size */
4597 msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
4598 0x7000, dev->tlp_rd_size);
4599
4600 /* enable link training */
4601 msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
4602
4603 PCIE_DBG(dev, "%s", "check if link is up\n");
4604
4605 /* Wait for up to 100ms for the link to come up */
4606 do {
4607 usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
4608 val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
4609 } while ((!(val & XMLH_LINK_UP) ||
4610 !msm_pcie_confirm_linkup(dev, false, false, NULL))
4611 && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
4612
4613 if ((val & XMLH_LINK_UP) &&
4614 msm_pcie_confirm_linkup(dev, false, false, NULL)) {
4615 PCIE_DBG(dev, "Link is up after %d checkings\n",
4616 link_check_count);
4617 PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
4618 } else {
4619 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4620 dev->rc_idx);
4621 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4622 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4623 PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
4624 dev->rc_idx);
4625 ret = -1;
4626 goto link_fail;
4627 }
4628
4629 msm_pcie_config_controller(dev);
4630
4631 if (!dev->msi_gicm_addr)
4632 msm_pcie_config_msi_controller(dev);
4633
4634 msm_pcie_config_link_state(dev);
4635
4636 dev->link_status = MSM_PCIE_LINK_ENABLED;
4637 dev->power_on = true;
4638 dev->suspending = false;
4639 dev->link_turned_on_counter++;
4640
4641 goto out;
4642
4643link_fail:
4644 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4645 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4646 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4647 msm_pcie_write_reg(dev->phy,
4648 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4649 msm_pcie_write_reg(dev->phy,
4650 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4651
4652 mutex_lock(&com_phy_lock);
4653 num_rc_on--;
4654 if (!num_rc_on && dev->common_phy) {
4655 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4656 dev->rc_idx);
4657 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4658 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4659 }
4660 mutex_unlock(&com_phy_lock);
4661
4662 msm_pcie_pipe_clk_deinit(dev);
4663 msm_pcie_clk_deinit(dev);
4664clk_fail:
4665 msm_pcie_vreg_deinit(dev);
4666out:
4667 mutex_unlock(&dev->setup_lock);
4668
4669 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4670
4671 return ret;
4672}
4673
4674void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
4675{
4676 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4677
4678 mutex_lock(&dev->setup_lock);
4679
4680 if (!dev->power_on) {
4681 PCIE_DBG(dev,
4682 "PCIe: the link of RC%d is already power down.\n",
4683 dev->rc_idx);
4684 mutex_unlock(&dev->setup_lock);
4685 return;
4686 }
4687
4688 dev->link_status = MSM_PCIE_LINK_DISABLED;
4689 dev->power_on = false;
4690 dev->link_turned_off_counter++;
4691
4692 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4693 dev->rc_idx);
4694
4695 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4696 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4697
4698 msm_pcie_write_reg(dev->phy,
4699 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4700 msm_pcie_write_reg(dev->phy,
4701 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4702
4703 mutex_lock(&com_phy_lock);
4704 num_rc_on--;
4705 if (!num_rc_on && dev->common_phy) {
4706 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4707 dev->rc_idx);
4708 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4709 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4710 }
4711 mutex_unlock(&com_phy_lock);
4712
4713 if (options & PM_CLK) {
4714 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
4715 BIT(0));
4716 msm_pcie_clk_deinit(dev);
4717 }
4718
4719 if (options & PM_VREG)
4720 msm_pcie_vreg_deinit(dev);
4721
4722 if (options & PM_PIPE_CLK)
4723 msm_pcie_pipe_clk_deinit(dev);
4724
4725 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4726 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4727 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4728
4729 mutex_unlock(&dev->setup_lock);
4730
4731 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4732}
4733
4734static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
4735 struct msm_pcie_device_info *ep_dev_info)
4736{
4737 u32 val;
4738 void __iomem *ep_base = ep_dev_info->conf_base;
4739 u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
4740 0xff;
4741
4742 while (current_offset) {
4743 if (msm_pcie_check_align(dev, current_offset))
4744 return;
4745
4746 val = readl_relaxed(ep_base + current_offset);
4747 if ((val & 0xff) == PCIE20_CAP_ID) {
4748 ep_dev_info->dev_ctrlstts_offset =
4749 current_offset + 0x8;
4750 break;
4751 }
4752 current_offset = (val >> 8) & 0xff;
4753 }
4754
4755 if (!ep_dev_info->dev_ctrlstts_offset) {
4756 PCIE_DBG(dev,
4757 "RC%d endpoint does not support PCIe cap registers\n",
4758 dev->rc_idx);
4759 return;
4760 }
4761
4762 PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
4763 dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
4764
4765 /* Enable AER on EP */
4766 msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
4767 BIT(3)|BIT(2)|BIT(1)|BIT(0));
4768
4769 PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
4770 readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
4771}
4772
4773static int msm_pcie_config_device_table(struct device *dev, void *pdev)
4774{
4775 struct pci_dev *pcidev = to_pci_dev(dev);
4776 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
4777 struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
4778 struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
4779 int ret = 0;
4780 u32 rc_idx = pcie_dev->rc_idx;
4781 u32 i, index;
4782 u32 bdf = 0;
4783 u8 type;
4784 u32 h_type;
4785 u32 bme;
4786
4787 if (!pcidev) {
4788 PCIE_ERR(pcie_dev,
4789 "PCIe: Did not find PCI device in list for RC%d.\n",
4790 pcie_dev->rc_idx);
4791 return -ENODEV;
4792 }
4793
4794 PCIE_DBG(pcie_dev,
4795 "PCI device found: vendor-id:0x%x device-id:0x%x\n",
4796 pcidev->vendor, pcidev->device);
4797
4798 if (!pcidev->bus->number)
4799 return ret;
4800
4801 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4802 type = pcidev->bus->number == 1 ?
4803 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
4804
4805 for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
4806 if (msm_pcie_dev_tbl[i].bdf == bdf &&
4807 !msm_pcie_dev_tbl[i].dev) {
4808 for (index = 0; index < MAX_DEVICE_NUM; index++) {
4809 if (dev_table_t[index].bdf == bdf) {
4810 msm_pcie_dev_tbl[i].dev = pcidev;
4811 msm_pcie_dev_tbl[i].domain = rc_idx;
4812 msm_pcie_dev_tbl[i].conf_base =
4813 pcie_dev->conf + index * SZ_4K;
4814 msm_pcie_dev_tbl[i].phy_address =
4815 axi_conf->start + index * SZ_4K;
4816
4817 dev_table_t[index].dev = pcidev;
4818 dev_table_t[index].domain = rc_idx;
4819 dev_table_t[index].conf_base =
4820 pcie_dev->conf + index * SZ_4K;
4821 dev_table_t[index].phy_address =
4822 axi_conf->start + index * SZ_4K;
4823
4824 msm_pcie_iatu_config(pcie_dev, index,
4825 type,
4826 dev_table_t[index].phy_address,
4827 dev_table_t[index].phy_address
4828 + SZ_4K - 1,
4829 bdf);
4830
4831 h_type = readl_relaxed(
4832 dev_table_t[index].conf_base +
4833 PCIE20_HEADER_TYPE);
4834
4835 bme = readl_relaxed(
4836 dev_table_t[index].conf_base +
4837 PCIE20_COMMAND_STATUS);
4838
4839 if (h_type & (1 << 16)) {
4840 pci_write_config_dword(pcidev,
4841 PCIE20_COMMAND_STATUS,
4842 bme | 0x06);
4843 } else {
4844 pcie_dev->num_ep++;
4845 dev_table_t[index].registered =
4846 false;
4847 }
4848
4849 if (pcie_dev->num_ep > 1)
4850 pcie_dev->pending_ep_reg = true;
4851
4852 msm_pcie_config_ep_aer(pcie_dev,
4853 &dev_table_t[index]);
4854
4855 break;
4856 }
4857 }
4858 if (index == MAX_DEVICE_NUM) {
4859 PCIE_ERR(pcie_dev,
4860 "RC%d PCI device table is full.\n",
4861 rc_idx);
4862 ret = index;
4863 } else {
4864 break;
4865 }
4866 } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
4867 pcidev == msm_pcie_dev_tbl[i].dev) {
4868 break;
4869 }
4870 }
4871 if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
4872 PCIE_ERR(pcie_dev,
4873 "Global PCI device table is full: %d elements.\n",
4874 i);
4875 PCIE_ERR(pcie_dev,
4876 "Bus number is 0x%x\nDevice number is 0x%x\n",
4877 pcidev->bus->number, pcidev->devfn);
4878 ret = i;
4879 }
4880 return ret;
4881}
4882
4883int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
4884{
4885 struct pci_dev *pcidev;
4886 struct msm_pcie_dev_t *pcie_dev;
4887 struct pci_bus *bus;
4888 int i;
4889 u32 bdf;
4890
4891 if (!dev) {
4892 pr_err("%s: PCIe: endpoint device passed in is NULL\n",
4893 __func__);
4894 return MSM_PCIE_ERROR;
4895 }
4896
4897 pcidev = to_pci_dev(dev);
4898 if (!pcidev) {
4899 pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
4900 __func__);
4901 return MSM_PCIE_ERROR;
4902 }
4903
4904 bus = pcidev->bus;
4905 if (!bus) {
4906 pr_err("%s: PCIe: Bus of PCI device is NULL\n",
4907 __func__);
4908 return MSM_PCIE_ERROR;
4909 }
4910
4911 while (!pci_is_root_bus(bus))
4912 bus = bus->parent;
4913
4914 pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
4915 if (!pcie_dev) {
4916 pr_err("%s: PCIe: Could not get PCIe structure\n",
4917 __func__);
4918 return MSM_PCIE_ERROR;
4919 }
4920
4921 if (!pcie_dev->smmu_exist) {
4922 PCIE_DBG(pcie_dev,
4923 "PCIe: RC:%d: smmu does not exist\n",
4924 pcie_dev->rc_idx);
4925 return MSM_PCIE_ERROR;
4926 }
4927
4928 PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
4929 pcie_dev->rc_idx, dev);
4930 PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
4931 pcie_dev->rc_idx, pcidev);
4932
4933 *domain = pcie_dev->rc_idx;
4934
4935 if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
4936 pcie_dev->current_short_bdf++;
4937 } else {
4938 PCIE_ERR(pcie_dev,
4939 "PCIe: RC%d: No more short BDF left\n",
4940 pcie_dev->rc_idx);
4941 return MSM_PCIE_ERROR;
4942 }
4943
4944 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4945
4946 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4947 if (pcie_dev->pcidev_table[i].bdf == bdf) {
4948 *sid = pcie_dev->smmu_sid_base +
4949 ((pcie_dev->rc_idx << 4) |
4950 pcie_dev->current_short_bdf);
4951
4952 msm_pcie_write_reg(pcie_dev->parf,
4953 PCIE20_PARF_BDF_TRANSLATE_N +
4954 pcie_dev->current_short_bdf * 4,
4955 bdf >> 16);
4956
4957 pcie_dev->pcidev_table[i].sid = *sid;
4958 pcie_dev->pcidev_table[i].short_bdf =
4959 pcie_dev->current_short_bdf;
4960 break;
4961 }
4962 }
4963
4964 if (i == MAX_DEVICE_NUM) {
4965 pcie_dev->current_short_bdf--;
4966 PCIE_ERR(pcie_dev,
4967 "PCIe: RC%d could not find BDF:%d\n",
4968 pcie_dev->rc_idx, bdf);
4969 return MSM_PCIE_ERROR;
4970 }
4971
4972 PCIE_DBG(pcie_dev,
4973 "PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
4974 pcie_dev->rc_idx,
4975 bdf >> 24,
4976 bdf >> 19 & 0x1f,
4977 bdf >> 16 & 0x07,
4978 *sid);
4979
4980 return 0;
4981}
4982EXPORT_SYMBOL(msm_pcie_configure_sid);
4983
4984int msm_pcie_enumerate(u32 rc_idx)
4985{
4986 int ret = 0, bus_ret = 0, scan_ret = 0;
4987 struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
4988
4989 mutex_lock(&dev->enumerate_lock);
4990
4991 PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
4992
4993 if (!dev->drv_ready) {
4994 PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
4995 rc_idx);
4996 ret = -EPROBE_DEFER;
4997 goto out;
4998 }
4999
5000 if (!dev->enumerated) {
5001 ret = msm_pcie_enable(dev, PM_ALL);
5002
5003 /* kick start ARM PCI configuration framework */
5004 if (!ret) {
5005 struct pci_dev *pcidev = NULL;
5006 bool found = false;
5007 struct pci_bus *bus;
5008 resource_size_t iobase = 0;
5009 u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
5010 u32 vendor_id = ids & 0xffff;
5011 u32 device_id = (ids & 0xffff0000) >> 16;
5012 LIST_HEAD(res);
5013
5014 PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
5015 vendor_id, device_id);
5016
5017 ret = of_pci_get_host_bridge_resources(
5018 dev->pdev->dev.of_node,
5019 0, 0xff, &res, &iobase);
5020 if (ret) {
5021 PCIE_ERR(dev,
5022 "PCIe: failed to get host bridge resources for RC%d: %d\n",
5023 dev->rc_idx, ret);
5024 goto out;
5025 }
5026
5027 bus = pci_create_root_bus(&dev->pdev->dev, 0,
5028 &msm_pcie_ops,
5029 msm_pcie_setup_sys_data(dev),
5030 &res);
5031 if (!bus) {
5032 PCIE_ERR(dev,
5033 "PCIe: failed to create root bus for RC%d\n",
5034 dev->rc_idx);
5035 ret = -ENOMEM;
5036 goto out;
5037 }
5038
5039 scan_ret = pci_scan_child_bus(bus);
5040 PCIE_DBG(dev,
5041 "PCIe: RC%d: The max subordinate bus number discovered is %d\n",
5042 dev->rc_idx, ret);
5043
5044 msm_pcie_fixup_irqs(dev);
5045 pci_assign_unassigned_bus_resources(bus);
5046 pci_bus_add_devices(bus);
5047
5048 dev->enumerated = true;
5049
5050 msm_pcie_write_mask(dev->dm_core +
5051 PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
5052
5053 if (dev->cpl_timeout && dev->bridge_found)
5054 msm_pcie_write_reg_field(dev->dm_core,
5055 PCIE20_DEVICE_CONTROL2_STATUS2,
5056 0xf, dev->cpl_timeout);
5057
5058 if (dev->shadow_en) {
5059 u32 val = readl_relaxed(dev->dm_core +
5060 PCIE20_COMMAND_STATUS);
5061 PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
5062 val);
5063 dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
5064 }
5065
5066 do {
5067 pcidev = pci_get_device(vendor_id,
5068 device_id, pcidev);
5069 if (pcidev && (&msm_pcie_dev[rc_idx] ==
5070 (struct msm_pcie_dev_t *)
5071 PCIE_BUS_PRIV_DATA(pcidev->bus))) {
5072 msm_pcie_dev[rc_idx].dev = pcidev;
5073 found = true;
5074 PCIE_DBG(&msm_pcie_dev[rc_idx],
5075 "PCI device is found for RC%d\n",
5076 rc_idx);
5077 }
5078 } while (!found && pcidev);
5079
5080 if (!pcidev) {
5081 PCIE_ERR(dev,
5082 "PCIe: Did not find PCI device for RC%d.\n",
5083 dev->rc_idx);
5084 ret = -ENODEV;
5085 goto out;
5086 }
5087
5088 bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
5089 &msm_pcie_config_device_table);
5090
5091 if (bus_ret) {
5092 PCIE_ERR(dev,
5093 "PCIe: Failed to set up device table for RC%d\n",
5094 dev->rc_idx);
5095 ret = -ENODEV;
5096 goto out;
5097 }
5098 } else {
5099 PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
5100 dev->rc_idx);
5101 }
5102 } else {
5103 PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
5104 dev->rc_idx);
5105 }
5106
5107out:
5108 mutex_unlock(&dev->enumerate_lock);
5109
5110 return ret;
5111}
5112EXPORT_SYMBOL(msm_pcie_enumerate);
5113
5114static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
5115 enum msm_pcie_event event)
5116{
5117 if (dev->event_reg && dev->event_reg->callback &&
5118 (dev->event_reg->events & event)) {
5119 struct msm_pcie_notify *notify = &dev->event_reg->notify;
5120
5121 notify->event = event;
5122 notify->user = dev->event_reg->user;
5123 PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
5124 dev->rc_idx, event);
5125 dev->event_reg->callback(notify);
5126
5127 if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
5128 (event == MSM_PCIE_EVENT_LINKDOWN)) {
5129 dev->user_suspend = true;
5130 PCIE_DBG(dev,
5131 "PCIe: Client of RC%d will recover the link later.\n",
5132 dev->rc_idx);
5133 return;
5134 }
5135 } else {
5136 PCIE_DBG2(dev,
5137 "PCIe: Client of RC%d does not have registration for event %d\n",
5138 dev->rc_idx, event);
5139 }
5140}
5141
5142static void handle_wake_func(struct work_struct *work)
5143{
5144 int i, ret;
5145 struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
5146 handle_wake_work);
5147
5148 PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
5149
5150 mutex_lock(&dev->recovery_lock);
5151
5152 if (!dev->enumerated) {
5153 PCIE_DBG(dev,
5154 "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
5155 dev->rc_idx);
5156
5157 ret = msm_pcie_enumerate(dev->rc_idx);
5158 if (ret) {
5159 PCIE_ERR(dev,
5160 "PCIe: failed to enable RC%d upon wake request from the device.\n",
5161 dev->rc_idx);
5162 goto out;
5163 }
5164
5165 if (dev->num_ep > 1) {
5166 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5167 dev->event_reg = dev->pcidev_table[i].event_reg;
5168
5169 if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
5170 && dev->event_reg &&
5171 dev->event_reg->callback &&
5172 (dev->event_reg->events &
5173 MSM_PCIE_EVENT_LINKUP)) {
5174 struct msm_pcie_notify *notify =
5175 &dev->event_reg->notify;
5176 notify->event = MSM_PCIE_EVENT_LINKUP;
5177 notify->user = dev->event_reg->user;
5178 PCIE_DBG(dev,
5179 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5180 dev->rc_idx);
5181 dev->event_reg->callback(notify);
5182 }
5183 }
5184 } else {
5185 if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
5186 dev->event_reg && dev->event_reg->callback &&
5187 (dev->event_reg->events &
5188 MSM_PCIE_EVENT_LINKUP)) {
5189 struct msm_pcie_notify *notify =
5190 &dev->event_reg->notify;
5191 notify->event = MSM_PCIE_EVENT_LINKUP;
5192 notify->user = dev->event_reg->user;
5193 PCIE_DBG(dev,
5194 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5195 dev->rc_idx);
5196 dev->event_reg->callback(notify);
5197 } else {
5198 PCIE_DBG(dev,
5199 "PCIe: Client of RC%d does not have registration for linkup event.\n",
5200 dev->rc_idx);
5201 }
5202 }
5203 goto out;
5204 } else {
5205 PCIE_ERR(dev,
5206 "PCIe: The enumeration for RC%d has already been done.\n",
5207 dev->rc_idx);
5208 goto out;
5209 }
5210
5211out:
5212 mutex_unlock(&dev->recovery_lock);
5213}
5214
5215static irqreturn_t handle_aer_irq(int irq, void *data)
5216{
5217 struct msm_pcie_dev_t *dev = data;
5218
5219 int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
5220 int ep_corr_val = 0, ep_uncorr_val = 0;
5221 int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
5222 u32 ep_dev_ctrlstts_offset = 0;
5223 int i, j, ep_src_bdf = 0;
5224 void __iomem *ep_base = NULL;
5225 unsigned long irqsave_flags;
5226
5227 PCIE_DBG2(dev,
5228 "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
5229 dev->rc_idx, irq, dev->rc_corr_counter,
5230 dev->rc_non_fatal_counter, dev->rc_fatal_counter,
5231 dev->ep_corr_counter, dev->ep_non_fatal_counter,
5232 dev->ep_fatal_counter);
5233
5234 spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
5235
5236 if (dev->suspending) {
5237 PCIE_DBG2(dev,
5238 "PCIe: RC%d is currently suspending.\n",
5239 dev->rc_idx);
5240 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5241 return IRQ_HANDLED;
5242 }
5243
5244 uncorr_val = readl_relaxed(dev->dm_core +
5245 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5246 corr_val = readl_relaxed(dev->dm_core +
5247 PCIE20_AER_CORR_ERR_STATUS_REG);
5248 rc_err_status = readl_relaxed(dev->dm_core +
5249 PCIE20_AER_ROOT_ERR_STATUS_REG);
5250 rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
5251 PCIE20_CAP_DEVCTRLSTATUS);
5252
5253 if (uncorr_val)
5254 PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5255 uncorr_val);
5256 if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
5257 PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5258 corr_val);
5259
5260 if ((rc_dev_ctrlstts >> 18) & 0x1)
5261 dev->rc_fatal_counter++;
5262 if ((rc_dev_ctrlstts >> 17) & 0x1)
5263 dev->rc_non_fatal_counter++;
5264 if ((rc_dev_ctrlstts >> 16) & 0x1)
5265 dev->rc_corr_counter++;
5266
5267 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
5268 BIT(18)|BIT(17)|BIT(16));
5269
5270 if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
5271 PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
5272 goto out;
5273 }
5274
5275 for (i = 0; i < 2; i++) {
5276 if (i)
5277 ep_src_bdf = readl_relaxed(dev->dm_core +
5278 PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
5279 else
5280 ep_src_bdf = (readl_relaxed(dev->dm_core +
5281 PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
5282
5283 if (!ep_src_bdf)
5284 continue;
5285
5286 for (j = 0; j < MAX_DEVICE_NUM; j++) {
5287 if (ep_src_bdf == dev->pcidev_table[j].bdf) {
5288 PCIE_DBG2(dev,
5289 "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
5290 i ? "Uncorrectable" : "Correctable",
5291 dev->pcidev_table[j].bdf >> 24,
5292 dev->pcidev_table[j].bdf >> 19 & 0x1f,
5293 dev->pcidev_table[j].bdf >> 16 & 0x07);
5294 ep_base = dev->pcidev_table[j].conf_base;
5295 ep_dev_ctrlstts_offset = dev->
5296 pcidev_table[j].dev_ctrlstts_offset;
5297 break;
5298 }
5299 }
5300
5301 if (!ep_base) {
5302 PCIE_ERR(dev,
5303 "PCIe: RC%d no endpoint found for reported error\n",
5304 dev->rc_idx);
5305 goto out;
5306 }
5307
5308 ep_uncorr_val = readl_relaxed(ep_base +
5309 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5310 ep_corr_val = readl_relaxed(ep_base +
5311 PCIE20_AER_CORR_ERR_STATUS_REG);
5312 ep_dev_ctrlstts = readl_relaxed(ep_base +
5313 ep_dev_ctrlstts_offset);
5314
5315 if (ep_uncorr_val)
5316 PCIE_DBG(dev,
5317 "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5318 ep_uncorr_val);
5319 if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
5320 PCIE_DBG(dev,
5321 "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5322 ep_corr_val);
5323
5324 if ((ep_dev_ctrlstts >> 18) & 0x1)
5325 dev->ep_fatal_counter++;
5326 if ((ep_dev_ctrlstts >> 17) & 0x1)
5327 dev->ep_non_fatal_counter++;
5328 if ((ep_dev_ctrlstts >> 16) & 0x1)
5329 dev->ep_corr_counter++;
5330
5331 msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
5332 BIT(18)|BIT(17)|BIT(16));
5333
5334 msm_pcie_write_reg_field(ep_base,
5335 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5336 0x3fff031, 0x3fff031);
5337 msm_pcie_write_reg_field(ep_base,
5338 PCIE20_AER_CORR_ERR_STATUS_REG,
5339 0xf1c1, 0xf1c1);
5340 }
5341out:
5342 if (((dev->rc_corr_counter < corr_counter_limit) &&
5343 (dev->ep_corr_counter < corr_counter_limit)) ||
5344 uncorr_val || ep_uncorr_val)
5345 PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
5346 rc_err_status);
5347 msm_pcie_write_reg_field(dev->dm_core,
5348 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5349 0x3fff031, 0x3fff031);
5350 msm_pcie_write_reg_field(dev->dm_core,
5351 PCIE20_AER_CORR_ERR_STATUS_REG,
5352 0xf1c1, 0xf1c1);
5353 msm_pcie_write_reg_field(dev->dm_core,
5354 PCIE20_AER_ROOT_ERR_STATUS_REG,
5355 0x7f, 0x7f);
5356
5357 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5358 return IRQ_HANDLED;
5359}
5360
5361static irqreturn_t handle_wake_irq(int irq, void *data)
5362{
5363 struct msm_pcie_dev_t *dev = data;
5364 unsigned long irqsave_flags;
5365 int i;
5366
5367 spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
5368
5369 dev->wake_counter++;
5370 PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
5371 dev->wake_counter, dev->rc_idx);
5372
5373 PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
5374 dev->rc_idx);
5375
5376 if (!dev->enumerated) {
5377 PCIE_DBG(dev, "Start enumeating RC%d\n", dev->rc_idx);
5378 if (dev->ep_wakeirq)
5379 schedule_work(&dev->handle_wake_work);
5380 else
5381 PCIE_DBG(dev,
5382 "wake irq is received but ep_wakeirq is not supported for RC%d.\n",
5383 dev->rc_idx);
5384 } else {
5385 PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
5386 __pm_stay_awake(&dev->ws);
5387 __pm_relax(&dev->ws);
5388
5389 if (dev->num_ep > 1) {
5390 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5391 dev->event_reg =
5392 dev->pcidev_table[i].event_reg;
5393 msm_pcie_notify_client(dev,
5394 MSM_PCIE_EVENT_WAKEUP);
5395 }
5396 } else {
5397 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
5398 }
5399 }
5400
5401 spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
5402
5403 return IRQ_HANDLED;
5404}
5405
5406static irqreturn_t handle_linkdown_irq(int irq, void *data)
5407{
5408 struct msm_pcie_dev_t *dev = data;
5409 unsigned long irqsave_flags;
5410 int i;
5411
5412 spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
5413
5414 dev->linkdown_counter++;
5415
5416 PCIE_DBG(dev,
5417 "PCIe: No. %ld linkdown IRQ for RC%d.\n",
5418 dev->linkdown_counter, dev->rc_idx);
5419
5420 if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
5421 PCIE_DBG(dev,
5422 "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
5423 dev->rc_idx);
5424 } else if (dev->suspending) {
5425 PCIE_DBG(dev,
5426 "PCIe:the link of RC%d is suspending.\n",
5427 dev->rc_idx);
5428 } else {
5429 dev->link_status = MSM_PCIE_LINK_DISABLED;
5430 dev->shadow_en = false;
5431
5432 if (dev->linkdown_panic)
5433 panic("User has chosen to panic on linkdown\n");
5434
5435 /* assert PERST */
5436 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
5437 dev->gpio[MSM_PCIE_GPIO_PERST].on);
5438 PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
5439
5440 if (dev->num_ep > 1) {
5441 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5442 dev->event_reg =
5443 dev->pcidev_table[i].event_reg;
5444 msm_pcie_notify_client(dev,
5445 MSM_PCIE_EVENT_LINKDOWN);
5446 }
5447 } else {
5448 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
5449 }
5450 }
5451
5452 spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
5453
5454 return IRQ_HANDLED;
5455}
5456
5457static irqreturn_t handle_msi_irq(int irq, void *data)
5458{
5459 int i, j;
5460 unsigned long val;
5461 struct msm_pcie_dev_t *dev = data;
5462 void __iomem *ctrl_status;
5463
5464 PCIE_DUMP(dev, "irq: %d\n", irq);
5465
5466 /*
5467 * check for set bits, clear it by setting that bit
5468 * and trigger corresponding irq
5469 */
5470 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
5471 ctrl_status = dev->dm_core +
5472 PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
5473
5474 val = readl_relaxed(ctrl_status);
5475 while (val) {
5476 j = find_first_bit(&val, 32);
5477 writel_relaxed(BIT(j), ctrl_status);
5478 /* ensure that interrupt is cleared (acked) */
5479 wmb();
5480 generic_handle_irq(
5481 irq_find_mapping(dev->irq_domain, (j + (32*i)))
5482 );
5483 val = readl_relaxed(ctrl_status);
5484 }
5485 }
5486
5487 return IRQ_HANDLED;
5488}
5489
5490static irqreturn_t handle_global_irq(int irq, void *data)
5491{
5492 int i;
5493 struct msm_pcie_dev_t *dev = data;
5494 unsigned long irqsave_flags;
5495 u32 status = 0;
5496
5497 spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
5498
5499 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
5500 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
5501
5502 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
5503
5504 PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
5505 dev->rc_idx, irq, status);
5506
5507 for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
5508 if (status & BIT(i)) {
5509 switch (i) {
5510 case MSM_PCIE_INT_EVT_LINK_DOWN:
5511 PCIE_DBG(dev,
5512 "PCIe: RC%d: handle linkdown event.\n",
5513 dev->rc_idx);
5514 handle_linkdown_irq(irq, data);
5515 break;
5516 case MSM_PCIE_INT_EVT_AER_LEGACY:
5517 PCIE_DBG(dev,
5518 "PCIe: RC%d: AER legacy event.\n",
5519 dev->rc_idx);
5520 handle_aer_irq(irq, data);
5521 break;
5522 case MSM_PCIE_INT_EVT_AER_ERR:
5523 PCIE_DBG(dev,
5524 "PCIe: RC%d: AER event.\n",
5525 dev->rc_idx);
5526 handle_aer_irq(irq, data);
5527 break;
5528 default:
5529 PCIE_ERR(dev,
5530 "PCIe: RC%d: Unexpected event %d is caught!\n",
5531 dev->rc_idx, i);
5532 }
5533 }
5534 }
5535
5536 spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
5537
5538 return IRQ_HANDLED;
5539}
5540
5541void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev)
5542{
5543 int pos, i;
5544 struct msm_pcie_dev_t *dev;
5545
5546 if (pcie_dev)
5547 dev = pcie_dev;
5548 else
5549 dev = irq_get_chip_data(irq);
5550
5551 if (!dev) {
5552 pr_err("PCIe: device is null. IRQ:%d\n", irq);
5553 return;
5554 }
5555
5556 if (dev->msi_gicm_addr) {
5557 PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
5558
5559 for (i = 0; i < MSM_PCIE_MAX_MSI; i++)
5560 if (irq == dev->msi[i].num)
5561 break;
5562 if (i == MSM_PCIE_MAX_MSI) {
5563 PCIE_ERR(dev,
5564 "Could not find irq: %d in RC%d MSI table\n",
5565 irq, dev->rc_idx);
5566 return;
5567 }
5568
5569 pos = i;
5570 } else {
5571 PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
5572 pos = irq - irq_find_mapping(dev->irq_domain, 0);
5573 }
5574
5575 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5576
5577 PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
5578 pos, *dev->msi_irq_in_use);
5579 clear_bit(pos, dev->msi_irq_in_use);
5580 PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
5581 pos, *dev->msi_irq_in_use);
5582}
5583
5584/* hookup to linux pci msi framework */
5585void arch_teardown_msi_irq(unsigned int irq)
5586{
5587 PCIE_GEN_DBG("irq %d deallocated\n", irq);
5588 msm_pcie_destroy_irq(irq, NULL);
5589}
5590
5591void arch_teardown_msi_irqs(struct pci_dev *dev)
5592{
5593 struct msi_desc *entry;
5594 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5595
5596 PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
5597 pcie_dev->rc_idx, dev->vendor, dev->device);
5598
5599 pcie_dev->use_msi = false;
5600
5601 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5602 int i, nvec;
5603
5604 if (entry->irq == 0)
5605 continue;
5606 nvec = 1 << entry->msi_attrib.multiple;
5607 for (i = 0; i < nvec; i++)
5608 msm_pcie_destroy_irq(entry->irq + i, pcie_dev);
5609 }
5610}
5611
5612static void msm_pcie_msi_nop(struct irq_data *d)
5613{
5614}
5615
5616static struct irq_chip pcie_msi_chip = {
5617 .name = "msm-pcie-msi",
5618 .irq_ack = msm_pcie_msi_nop,
5619 .irq_enable = unmask_msi_irq,
5620 .irq_disable = mask_msi_irq,
5621 .irq_mask = mask_msi_irq,
5622 .irq_unmask = unmask_msi_irq,
5623};
5624
5625static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
5626{
5627 int irq, pos;
5628
5629 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5630
5631again:
5632 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5633
5634 if (pos >= PCIE_MSI_NR_IRQS)
5635 return -ENOSPC;
5636
5637 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5638
5639 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5640 goto again;
5641 else
5642 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5643
5644 irq = irq_create_mapping(dev->irq_domain, pos);
5645 if (!irq)
5646 return -EINVAL;
5647
5648 return irq;
5649}
5650
5651static int arch_setup_msi_irq_default(struct pci_dev *pdev,
5652 struct msi_desc *desc, int nvec)
5653{
5654 int irq;
5655 struct msi_msg msg;
5656 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5657
5658 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5659
5660 irq = msm_pcie_create_irq(dev);
5661
5662 PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
5663
5664 if (irq < 0)
5665 return irq;
5666
5667 PCIE_DBG(dev, "irq %d allocated\n", irq);
5668
5669 irq_set_msi_desc(irq, desc);
5670
5671 /* write msi vector and data */
5672 msg.address_hi = 0;
5673 msg.address_lo = MSM_PCIE_MSI_PHY;
5674 msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
5675 write_msi_msg(irq, &msg);
5676
5677 return 0;
5678}
5679
5680static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
5681{
5682 int irq, pos;
5683
5684 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5685
5686again:
5687 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5688
5689 if (pos >= PCIE_MSI_NR_IRQS)
5690 return -ENOSPC;
5691
5692 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5693
5694 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5695 goto again;
5696 else
5697 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5698
5699 if (pos >= MSM_PCIE_MAX_MSI) {
5700 PCIE_ERR(dev,
5701 "PCIe: RC%d: pos %d is not less than %d\n",
5702 dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
5703 return MSM_PCIE_ERROR;
5704 }
5705
5706 irq = dev->msi[pos].num;
5707 if (!irq) {
5708 PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
5709 dev->rc_idx);
5710 return -EINVAL;
5711 }
5712
5713 return irq;
5714}
5715
5716static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
5717 struct msi_desc *desc, int nvec)
5718{
5719 int irq, index, firstirq = 0;
5720 struct msi_msg msg;
5721 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5722
5723 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5724
5725 for (index = 0; index < nvec; index++) {
5726 irq = msm_pcie_create_irq_qgic(dev);
5727 PCIE_DBG(dev, "irq %d is allocated\n", irq);
5728
5729 if (irq < 0)
5730 return irq;
5731
5732 if (index == 0)
5733 firstirq = irq;
5734
5735 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
5736 }
5737
5738 /* write msi vector and data */
5739 irq_set_msi_desc(firstirq, desc);
5740 msg.address_hi = 0;
5741 msg.address_lo = dev->msi_gicm_addr;
5742 msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
5743 write_msi_msg(firstirq, &msg);
5744
5745 return 0;
5746}
5747
5748int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
5749{
5750 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5751
5752 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5753
5754 if (dev->msi_gicm_addr)
5755 return arch_setup_msi_irq_qgic(pdev, desc, 1);
5756 else
5757 return arch_setup_msi_irq_default(pdev, desc, 1);
5758}
5759
5760static int msm_pcie_get_msi_multiple(int nvec)
5761{
5762 int msi_multiple = 0;
5763
5764 while (nvec) {
5765 nvec = nvec >> 1;
5766 msi_multiple++;
5767 }
5768 PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
5769 msi_multiple - 1);
5770
5771 return msi_multiple - 1;
5772}
5773
5774int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
5775{
5776 struct msi_desc *entry;
5777 int ret;
5778 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5779
5780 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5781
5782 if (type != PCI_CAP_ID_MSI || nvec > 32)
5783 return -ENOSPC;
5784
5785 PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
5786
5787 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5788 entry->msi_attrib.multiple =
5789 msm_pcie_get_msi_multiple(nvec);
5790
5791 if (pcie_dev->msi_gicm_addr)
5792 ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
5793 else
5794 ret = arch_setup_msi_irq_default(dev, entry, nvec);
5795
5796 PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
5797
5798 if (ret < 0)
5799 return ret;
5800 if (ret > 0)
5801 return -ENOSPC;
5802 }
5803
5804 pcie_dev->use_msi = true;
5805
5806 return 0;
5807}
5808
5809static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
5810 irq_hw_number_t hwirq)
5811{
5812 irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
5813 irq_set_chip_data(irq, domain->host_data);
5814 return 0;
5815}
5816
5817static const struct irq_domain_ops msm_pcie_msi_ops = {
5818 .map = msm_pcie_msi_map,
5819};
5820
5821int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
5822{
5823 int rc;
5824 int msi_start = 0;
5825 struct device *pdev = &dev->pdev->dev;
5826
5827 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5828
5829 if (dev->rc_idx)
5830 wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
5831 else
5832 wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
5833
5834 /* register handler for linkdown interrupt */
5835 if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
5836 rc = devm_request_irq(pdev,
5837 dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
5838 handle_linkdown_irq,
5839 IRQF_TRIGGER_RISING,
5840 dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
5841 dev);
5842 if (rc) {
5843 PCIE_ERR(dev,
5844 "PCIe: Unable to request linkdown interrupt:%d\n",
5845 dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
5846 return rc;
5847 }
5848 }
5849
5850 /* register handler for physical MSI interrupt line */
5851 if (dev->irq[MSM_PCIE_INT_MSI].num) {
5852 rc = devm_request_irq(pdev,
5853 dev->irq[MSM_PCIE_INT_MSI].num,
5854 handle_msi_irq,
5855 IRQF_TRIGGER_RISING,
5856 dev->irq[MSM_PCIE_INT_MSI].name,
5857 dev);
5858 if (rc) {
5859 PCIE_ERR(dev,
5860 "PCIe: RC%d: Unable to request MSI interrupt\n",
5861 dev->rc_idx);
5862 return rc;
5863 }
5864 }
5865
5866 /* register handler for AER interrupt */
5867 if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
5868 rc = devm_request_irq(pdev,
5869 dev->irq[MSM_PCIE_INT_PLS_ERR].num,
5870 handle_aer_irq,
5871 IRQF_TRIGGER_RISING,
5872 dev->irq[MSM_PCIE_INT_PLS_ERR].name,
5873 dev);
5874 if (rc) {
5875 PCIE_ERR(dev,
5876 "PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
5877 dev->rc_idx,
5878 dev->irq[MSM_PCIE_INT_PLS_ERR].num);
5879 return rc;
5880 }
5881 }
5882
5883 /* register handler for AER legacy interrupt */
5884 if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
5885 rc = devm_request_irq(pdev,
5886 dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
5887 handle_aer_irq,
5888 IRQF_TRIGGER_RISING,
5889 dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
5890 dev);
5891 if (rc) {
5892 PCIE_ERR(dev,
5893 "PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
5894 dev->rc_idx,
5895 dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
5896 return rc;
5897 }
5898 }
5899
5900 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
5901 rc = devm_request_irq(pdev,
5902 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
5903 handle_global_irq,
5904 IRQF_TRIGGER_RISING,
5905 dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
5906 dev);
5907 if (rc) {
5908 PCIE_ERR(dev,
5909 "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
5910 dev->rc_idx,
5911 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
5912 return rc;
5913 }
5914 }
5915
5916 /* register handler for PCIE_WAKE_N interrupt line */
5917 if (dev->wake_n) {
5918 rc = devm_request_irq(pdev,
5919 dev->wake_n, handle_wake_irq,
5920 IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
5921 if (rc) {
5922 PCIE_ERR(dev,
5923 "PCIe: RC%d: Unable to request wake interrupt\n",
5924 dev->rc_idx);
5925 return rc;
5926 }
5927
5928 INIT_WORK(&dev->handle_wake_work, handle_wake_func);
5929
5930 rc = enable_irq_wake(dev->wake_n);
5931 if (rc) {
5932 PCIE_ERR(dev,
5933 "PCIe: RC%d: Unable to enable wake interrupt\n",
5934 dev->rc_idx);
5935 return rc;
5936 }
5937 }
5938
5939 /* Create a virtual domain of interrupts */
5940 if (!dev->msi_gicm_addr) {
5941 dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
5942 PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
5943
5944 if (!dev->irq_domain) {
5945 PCIE_ERR(dev,
5946 "PCIe: RC%d: Unable to initialize irq domain\n",
5947 dev->rc_idx);
5948
5949 if (dev->wake_n)
5950 disable_irq(dev->wake_n);
5951
5952 return PTR_ERR(dev->irq_domain);
5953 }
5954
5955 msi_start = irq_create_mapping(dev->irq_domain, 0);
5956 }
5957
5958 return 0;
5959}
5960
5961void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
5962{
5963 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5964
5965 wakeup_source_trash(&dev->ws);
5966
5967 if (dev->wake_n)
5968 disable_irq(dev->wake_n);
5969}
5970
5971
5972static int msm_pcie_probe(struct platform_device *pdev)
5973{
5974 int ret = 0;
5975 int rc_idx = -1;
5976 int i, j;
5977
5978 PCIE_GEN_DBG("%s\n", __func__);
5979
5980 mutex_lock(&pcie_drv.drv_lock);
5981
5982 ret = of_property_read_u32((&pdev->dev)->of_node,
5983 "cell-index", &rc_idx);
5984 if (ret) {
5985 PCIE_GEN_DBG("Did not find RC index.\n");
5986 goto out;
5987 } else {
5988 if (rc_idx >= MAX_RC_NUM) {
5989 pr_err(
5990 "PCIe: Invalid RC Index %d (max supported = %d)\n",
5991 rc_idx, MAX_RC_NUM);
5992 goto out;
5993 }
5994 pcie_drv.rc_num++;
5995 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
5996 rc_idx);
5997 }
5998
5999 msm_pcie_dev[rc_idx].l0s_supported =
6000 of_property_read_bool((&pdev->dev)->of_node,
6001 "qcom,l0s-supported");
6002 PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
6003 msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
6004 msm_pcie_dev[rc_idx].l1_supported =
6005 of_property_read_bool((&pdev->dev)->of_node,
6006 "qcom,l1-supported");
6007 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
6008 msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
6009 msm_pcie_dev[rc_idx].l1ss_supported =
6010 of_property_read_bool((&pdev->dev)->of_node,
6011 "qcom,l1ss-supported");
6012 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
6013 msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
6014 msm_pcie_dev[rc_idx].common_clk_en =
6015 of_property_read_bool((&pdev->dev)->of_node,
6016 "qcom,common-clk-en");
6017 PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
6018 msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
6019 msm_pcie_dev[rc_idx].clk_power_manage_en =
6020 of_property_read_bool((&pdev->dev)->of_node,
6021 "qcom,clk-power-manage-en");
6022 PCIE_DBG(&msm_pcie_dev[rc_idx],
6023 "Clock power management is %s enabled.\n",
6024 msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
6025 msm_pcie_dev[rc_idx].aux_clk_sync =
6026 of_property_read_bool((&pdev->dev)->of_node,
6027 "qcom,aux-clk-sync");
6028 PCIE_DBG(&msm_pcie_dev[rc_idx],
6029 "AUX clock is %s synchronous to Core clock.\n",
6030 msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
6031
6032 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
6033 of_property_read_bool((&pdev->dev)->of_node,
6034 "qcom,use-19p2mhz-aux-clk");
6035 PCIE_DBG(&msm_pcie_dev[rc_idx],
6036 "AUX clock frequency is %s 19.2MHz.\n",
6037 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
6038
6039 msm_pcie_dev[rc_idx].smmu_exist =
6040 of_property_read_bool((&pdev->dev)->of_node,
6041 "qcom,smmu-exist");
6042 PCIE_DBG(&msm_pcie_dev[rc_idx],
6043 "SMMU does %s exist.\n",
6044 msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
6045
6046 msm_pcie_dev[rc_idx].smmu_sid_base = 0;
6047 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
6048 &msm_pcie_dev[rc_idx].smmu_sid_base);
6049 if (ret)
6050 PCIE_DBG(&msm_pcie_dev[rc_idx],
6051 "RC%d SMMU sid base not found\n",
6052 msm_pcie_dev[rc_idx].rc_idx);
6053 else
6054 PCIE_DBG(&msm_pcie_dev[rc_idx],
6055 "RC%d: qcom,smmu-sid-base: 0x%x.\n",
6056 msm_pcie_dev[rc_idx].rc_idx,
6057 msm_pcie_dev[rc_idx].smmu_sid_base);
6058
6059 msm_pcie_dev[rc_idx].ep_wakeirq =
6060 of_property_read_bool((&pdev->dev)->of_node,
6061 "qcom,ep-wakeirq");
6062 PCIE_DBG(&msm_pcie_dev[rc_idx],
6063 "PCIe: EP of RC%d does %s assert wake when it is up.\n",
6064 rc_idx, msm_pcie_dev[rc_idx].ep_wakeirq ? "" : "not");
6065
6066 msm_pcie_dev[rc_idx].phy_ver = 1;
6067 ret = of_property_read_u32((&pdev->dev)->of_node,
6068 "qcom,pcie-phy-ver",
6069 &msm_pcie_dev[rc_idx].phy_ver);
6070 if (ret)
6071 PCIE_DBG(&msm_pcie_dev[rc_idx],
6072 "RC%d: pcie-phy-ver does not exist.\n",
6073 msm_pcie_dev[rc_idx].rc_idx);
6074 else
6075 PCIE_DBG(&msm_pcie_dev[rc_idx],
6076 "RC%d: pcie-phy-ver: %d.\n",
6077 msm_pcie_dev[rc_idx].rc_idx,
6078 msm_pcie_dev[rc_idx].phy_ver);
6079
6080 msm_pcie_dev[rc_idx].n_fts = 0;
6081 ret = of_property_read_u32((&pdev->dev)->of_node,
6082 "qcom,n-fts",
6083 &msm_pcie_dev[rc_idx].n_fts);
6084
6085 if (ret)
6086 PCIE_DBG(&msm_pcie_dev[rc_idx],
6087 "n-fts does not exist. ret=%d\n", ret);
6088 else
6089 PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
6090 msm_pcie_dev[rc_idx].n_fts);
6091
6092 msm_pcie_dev[rc_idx].common_phy =
6093 of_property_read_bool((&pdev->dev)->of_node,
6094 "qcom,common-phy");
6095 PCIE_DBG(&msm_pcie_dev[rc_idx],
6096 "PCIe: RC%d: Common PHY does %s exist.\n",
6097 rc_idx, msm_pcie_dev[rc_idx].common_phy ? "" : "not");
6098
6099 msm_pcie_dev[rc_idx].ext_ref_clk =
6100 of_property_read_bool((&pdev->dev)->of_node,
6101 "qcom,ext-ref-clk");
6102 PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
6103 msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
6104
6105 msm_pcie_dev[rc_idx].ep_latency = 0;
6106 ret = of_property_read_u32((&pdev->dev)->of_node,
6107 "qcom,ep-latency",
6108 &msm_pcie_dev[rc_idx].ep_latency);
6109 if (ret)
6110 PCIE_DBG(&msm_pcie_dev[rc_idx],
6111 "RC%d: ep-latency does not exist.\n",
6112 rc_idx);
6113 else
6114 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
6115 rc_idx, msm_pcie_dev[rc_idx].ep_latency);
6116
6117 msm_pcie_dev[rc_idx].wr_halt_size = 0;
6118 ret = of_property_read_u32(pdev->dev.of_node,
6119 "qcom,wr-halt-size",
6120 &msm_pcie_dev[rc_idx].wr_halt_size);
6121 if (ret)
6122 PCIE_DBG(&msm_pcie_dev[rc_idx],
6123 "RC%d: wr-halt-size not specified in dt. Use default value.\n",
6124 rc_idx);
6125 else
6126 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
6127 rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
6128
6129 msm_pcie_dev[rc_idx].cpl_timeout = 0;
6130 ret = of_property_read_u32((&pdev->dev)->of_node,
6131 "qcom,cpl-timeout",
6132 &msm_pcie_dev[rc_idx].cpl_timeout);
6133 if (ret)
6134 PCIE_DBG(&msm_pcie_dev[rc_idx],
6135 "RC%d: Using default cpl-timeout.\n",
6136 rc_idx);
6137 else
6138 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
6139 rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
6140
6141 msm_pcie_dev[rc_idx].perst_delay_us_min =
6142 PERST_PROPAGATION_DELAY_US_MIN;
6143 ret = of_property_read_u32(pdev->dev.of_node,
6144 "qcom,perst-delay-us-min",
6145 &msm_pcie_dev[rc_idx].perst_delay_us_min);
6146 if (ret)
6147 PCIE_DBG(&msm_pcie_dev[rc_idx],
6148 "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
6149 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6150 else
6151 PCIE_DBG(&msm_pcie_dev[rc_idx],
6152 "RC%d: perst-delay-us-min: %dus.\n",
6153 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6154
6155 msm_pcie_dev[rc_idx].perst_delay_us_max =
6156 PERST_PROPAGATION_DELAY_US_MAX;
6157 ret = of_property_read_u32(pdev->dev.of_node,
6158 "qcom,perst-delay-us-max",
6159 &msm_pcie_dev[rc_idx].perst_delay_us_max);
6160 if (ret)
6161 PCIE_DBG(&msm_pcie_dev[rc_idx],
6162 "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
6163 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6164 else
6165 PCIE_DBG(&msm_pcie_dev[rc_idx],
6166 "RC%d: perst-delay-us-max: %dus.\n",
6167 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6168
6169 msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
6170 ret = of_property_read_u32(pdev->dev.of_node,
6171 "qcom,tlp-rd-size",
6172 &msm_pcie_dev[rc_idx].tlp_rd_size);
6173 if (ret)
6174 PCIE_DBG(&msm_pcie_dev[rc_idx],
6175 "RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
6176 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6177 else
6178 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
6179 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6180
6181 msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
6182 msm_pcie_dev[rc_idx].msi_gicm_base = 0;
6183 ret = of_property_read_u32((&pdev->dev)->of_node,
6184 "qcom,msi-gicm-addr",
6185 &msm_pcie_dev[rc_idx].msi_gicm_addr);
6186
6187 if (ret) {
6188 PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
6189 "msi-gicm-addr does not exist.\n");
6190 } else {
6191 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
6192 msm_pcie_dev[rc_idx].msi_gicm_addr);
6193
6194 ret = of_property_read_u32((&pdev->dev)->of_node,
6195 "qcom,msi-gicm-base",
6196 &msm_pcie_dev[rc_idx].msi_gicm_base);
6197
6198 if (ret) {
6199 PCIE_ERR(&msm_pcie_dev[rc_idx],
6200 "PCIe: RC%d: msi-gicm-base does not exist.\n",
6201 rc_idx);
6202 goto decrease_rc_num;
6203 } else {
6204 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
6205 msm_pcie_dev[rc_idx].msi_gicm_base);
6206 }
6207 }
6208
6209 msm_pcie_dev[rc_idx].scm_dev_id = 0;
6210 ret = of_property_read_u32((&pdev->dev)->of_node,
6211 "qcom,scm-dev-id",
6212 &msm_pcie_dev[rc_idx].scm_dev_id);
6213
6214 msm_pcie_dev[rc_idx].rc_idx = rc_idx;
6215 msm_pcie_dev[rc_idx].pdev = pdev;
6216 msm_pcie_dev[rc_idx].vreg_n = 0;
6217 msm_pcie_dev[rc_idx].gpio_n = 0;
6218 msm_pcie_dev[rc_idx].parf_deemph = 0;
6219 msm_pcie_dev[rc_idx].parf_swing = 0;
6220 msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
6221 msm_pcie_dev[rc_idx].user_suspend = false;
6222 msm_pcie_dev[rc_idx].disable_pc = false;
6223 msm_pcie_dev[rc_idx].saved_state = NULL;
6224 msm_pcie_dev[rc_idx].enumerated = false;
6225 msm_pcie_dev[rc_idx].num_active_ep = 0;
6226 msm_pcie_dev[rc_idx].num_ep = 0;
6227 msm_pcie_dev[rc_idx].pending_ep_reg = false;
6228 msm_pcie_dev[rc_idx].phy_len = 0;
6229 msm_pcie_dev[rc_idx].port_phy_len = 0;
6230 msm_pcie_dev[rc_idx].phy_sequence = NULL;
6231 msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
6232 msm_pcie_dev[rc_idx].event_reg = NULL;
6233 msm_pcie_dev[rc_idx].linkdown_counter = 0;
6234 msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
6235 msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
6236 msm_pcie_dev[rc_idx].rc_corr_counter = 0;
6237 msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
6238 msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
6239 msm_pcie_dev[rc_idx].ep_corr_counter = 0;
6240 msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
6241 msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
6242 msm_pcie_dev[rc_idx].suspending = false;
6243 msm_pcie_dev[rc_idx].wake_counter = 0;
6244 msm_pcie_dev[rc_idx].aer_enable = true;
6245 msm_pcie_dev[rc_idx].power_on = false;
6246 msm_pcie_dev[rc_idx].current_short_bdf = 0;
6247 msm_pcie_dev[rc_idx].use_msi = false;
6248 msm_pcie_dev[rc_idx].use_pinctrl = false;
6249 msm_pcie_dev[rc_idx].linkdown_panic = false;
6250 msm_pcie_dev[rc_idx].bridge_found = false;
6251 memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
6252 sizeof(msm_pcie_vreg_info));
6253 memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
6254 sizeof(msm_pcie_gpio_info));
6255 memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
6256 sizeof(msm_pcie_clk_info[rc_idx]));
6257 memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
6258 sizeof(msm_pcie_pipe_clk_info[rc_idx]));
6259 memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
6260 sizeof(msm_pcie_res_info));
6261 memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
6262 sizeof(msm_pcie_irq_info));
6263 memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
6264 sizeof(msm_pcie_msi_info));
6265 memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
6266 sizeof(msm_pcie_reset_info[rc_idx]));
6267 memcpy(msm_pcie_dev[rc_idx].pipe_reset,
6268 msm_pcie_pipe_reset_info[rc_idx],
6269 sizeof(msm_pcie_pipe_reset_info[rc_idx]));
6270 msm_pcie_dev[rc_idx].shadow_en = true;
6271 for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
6272 msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
6273 for (i = 0; i < MAX_DEVICE_NUM; i++)
6274 for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
6275 msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
6276 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6277 msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
6278 msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
6279 msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
6280 msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
6281 msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
6282 msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0;
6283 msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
6284 msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
6285 msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
6286 msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
6287 }
6288
6289 ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
6290 msm_pcie_dev[rc_idx].pdev);
6291
6292 if (ret)
6293 goto decrease_rc_num;
6294
6295 msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
6296 if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
6297 PCIE_ERR(&msm_pcie_dev[rc_idx],
6298 "PCIe: RC%d failed to get pinctrl\n",
6299 rc_idx);
6300 else
6301 msm_pcie_dev[rc_idx].use_pinctrl = true;
6302
6303 if (msm_pcie_dev[rc_idx].use_pinctrl) {
6304 msm_pcie_dev[rc_idx].pins_default =
6305 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6306 "default");
6307 if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
6308 PCIE_ERR(&msm_pcie_dev[rc_idx],
6309 "PCIe: RC%d could not get pinctrl default state\n",
6310 rc_idx);
6311 msm_pcie_dev[rc_idx].pins_default = NULL;
6312 }
6313
6314 msm_pcie_dev[rc_idx].pins_sleep =
6315 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6316 "sleep");
6317 if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
6318 PCIE_ERR(&msm_pcie_dev[rc_idx],
6319 "PCIe: RC%d could not get pinctrl sleep state\n",
6320 rc_idx);
6321 msm_pcie_dev[rc_idx].pins_sleep = NULL;
6322 }
6323 }
6324
6325 ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
6326 if (ret) {
6327 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6328 goto decrease_rc_num;
6329 }
6330
6331 ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
6332 if (ret) {
6333 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6334 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6335 goto decrease_rc_num;
6336 }
6337
6338 msm_pcie_dev[rc_idx].drv_ready = true;
6339
6340 if (msm_pcie_dev[rc_idx].ep_wakeirq) {
6341 PCIE_DBG(&msm_pcie_dev[rc_idx],
6342 "PCIe: RC%d will be enumerated upon WAKE signal from Endpoint.\n",
6343 rc_idx);
6344 mutex_unlock(&pcie_drv.drv_lock);
6345 return 0;
6346 }
6347
6348 ret = msm_pcie_enumerate(rc_idx);
6349
6350 if (ret)
6351 PCIE_ERR(&msm_pcie_dev[rc_idx],
6352 "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
6353 rc_idx);
6354 else
6355 PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
6356 rc_idx);
6357
6358 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
6359 dev_name(&(pdev->dev)));
6360
6361 mutex_unlock(&pcie_drv.drv_lock);
6362 return 0;
6363
6364decrease_rc_num:
6365 pcie_drv.rc_num--;
6366out:
6367 if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
6368 pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
6369 rc_idx);
6370 else
6371 PCIE_ERR(&msm_pcie_dev[rc_idx],
6372 "PCIe: Driver probe failed for RC%d:%d\n",
6373 rc_idx, ret);
6374
6375 mutex_unlock(&pcie_drv.drv_lock);
6376
6377 return ret;
6378}
6379
6380static int msm_pcie_remove(struct platform_device *pdev)
6381{
6382 int ret = 0;
6383 int rc_idx;
6384
6385 PCIE_GEN_DBG("PCIe:%s.\n", __func__);
6386
6387 mutex_lock(&pcie_drv.drv_lock);
6388
6389 ret = of_property_read_u32((&pdev->dev)->of_node,
6390 "cell-index", &rc_idx);
6391 if (ret) {
6392 pr_err("%s: Did not find RC index.\n", __func__);
6393 goto out;
6394 } else {
6395 pcie_drv.rc_num--;
6396 PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
6397 }
6398
6399 msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
6400 msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
6401 msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
6402 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6403 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6404
6405out:
6406 mutex_unlock(&pcie_drv.drv_lock);
6407
6408 return ret;
6409}
6410
6411static const struct of_device_id msm_pcie_match[] = {
6412 { .compatible = "qcom,pci-msm",
6413 },
6414 {}
6415};
6416
6417static struct platform_driver msm_pcie_driver = {
6418 .probe = msm_pcie_probe,
6419 .remove = msm_pcie_remove,
6420 .driver = {
6421 .name = "pci-msm",
6422 .owner = THIS_MODULE,
6423 .of_match_table = msm_pcie_match,
6424 },
6425};
6426
6427int __init pcie_init(void)
6428{
6429 int ret = 0, i;
6430 char rc_name[MAX_RC_NAME_LEN];
6431
6432 pr_alert("pcie:%s.\n", __func__);
6433
6434 pcie_drv.rc_num = 0;
6435 mutex_init(&pcie_drv.drv_lock);
6436 mutex_init(&com_phy_lock);
6437
6438 for (i = 0; i < MAX_RC_NUM; i++) {
6439 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
6440 msm_pcie_dev[i].ipc_log =
6441 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6442 if (msm_pcie_dev[i].ipc_log == NULL)
6443 pr_err("%s: unable to create IPC log context for %s\n",
6444 __func__, rc_name);
6445 else
6446 PCIE_DBG(&msm_pcie_dev[i],
6447 "PCIe IPC logging is enable for RC%d\n",
6448 i);
6449 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
6450 msm_pcie_dev[i].ipc_log_long =
6451 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6452 if (msm_pcie_dev[i].ipc_log_long == NULL)
6453 pr_err("%s: unable to create IPC log context for %s\n",
6454 __func__, rc_name);
6455 else
6456 PCIE_DBG(&msm_pcie_dev[i],
6457 "PCIe IPC logging %s is enable for RC%d\n",
6458 rc_name, i);
6459 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
6460 msm_pcie_dev[i].ipc_log_dump =
6461 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6462 if (msm_pcie_dev[i].ipc_log_dump == NULL)
6463 pr_err("%s: unable to create IPC log context for %s\n",
6464 __func__, rc_name);
6465 else
6466 PCIE_DBG(&msm_pcie_dev[i],
6467 "PCIe IPC logging %s is enable for RC%d\n",
6468 rc_name, i);
6469 spin_lock_init(&msm_pcie_dev[i].cfg_lock);
6470 msm_pcie_dev[i].cfg_access = true;
6471 mutex_init(&msm_pcie_dev[i].enumerate_lock);
6472 mutex_init(&msm_pcie_dev[i].setup_lock);
6473 mutex_init(&msm_pcie_dev[i].recovery_lock);
6474 spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
6475 spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
6476 spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
6477 spin_lock_init(&msm_pcie_dev[i].aer_lock);
6478 msm_pcie_dev[i].drv_ready = false;
6479 }
6480 for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
6481 msm_pcie_dev_tbl[i].bdf = 0;
6482 msm_pcie_dev_tbl[i].dev = NULL;
6483 msm_pcie_dev_tbl[i].short_bdf = 0;
6484 msm_pcie_dev_tbl[i].sid = 0;
6485 msm_pcie_dev_tbl[i].domain = -1;
6486 msm_pcie_dev_tbl[i].conf_base = 0;
6487 msm_pcie_dev_tbl[i].phy_address = 0;
6488 msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
6489 msm_pcie_dev_tbl[i].event_reg = NULL;
6490 msm_pcie_dev_tbl[i].registered = true;
6491 }
6492
6493 msm_pcie_debugfs_init();
6494
6495 ret = platform_driver_register(&msm_pcie_driver);
6496
6497 return ret;
6498}
6499
6500static void __exit pcie_exit(void)
6501{
6502 PCIE_GEN_DBG("pcie:%s.\n", __func__);
6503
6504 platform_driver_unregister(&msm_pcie_driver);
6505
6506 msm_pcie_debugfs_exit();
6507}
6508
6509subsys_initcall_sync(pcie_init);
6510module_exit(pcie_exit);
6511
6512
6513/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
6514static void msm_pcie_fixup_early(struct pci_dev *dev)
6515{
6516 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6517
6518 PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
6519 if (dev->hdr_type == 1)
6520 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
6521}
6522DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6523 msm_pcie_fixup_early);
6524
6525/* Suspend the PCIe link */
6526static int msm_pcie_pm_suspend(struct pci_dev *dev,
6527 void *user, void *data, u32 options)
6528{
6529 int ret = 0;
6530 u32 val = 0;
6531 int ret_l23;
6532 unsigned long irqsave_flags;
6533 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6534
6535 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6536
6537 spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
6538 pcie_dev->suspending = true;
6539 spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
6540
6541 if (!pcie_dev->power_on) {
6542 PCIE_DBG(pcie_dev,
6543 "PCIe: power of RC%d has been turned off.\n",
6544 pcie_dev->rc_idx);
6545 return ret;
6546 }
6547
6548 if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
6549 && msm_pcie_confirm_linkup(pcie_dev, true, true,
6550 pcie_dev->conf)) {
6551 ret = pci_save_state(dev);
6552 pcie_dev->saved_state = pci_store_saved_state(dev);
6553 }
6554 if (ret) {
6555 PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
6556 pcie_dev->rc_idx, ret);
6557 pcie_dev->suspending = false;
6558 return ret;
6559 }
6560
6561 spin_lock_irqsave(&pcie_dev->cfg_lock,
6562 pcie_dev->irqsave_flags);
6563 pcie_dev->cfg_access = false;
6564 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6565 pcie_dev->irqsave_flags);
6566
6567 msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
6568 BIT(4));
6569
6570 PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
6571 pcie_dev->rc_idx);
6572
6573 ret_l23 = readl_poll_timeout((pcie_dev->parf
6574 + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
6575
6576 /* check L23_Ready */
6577 PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
6578 pcie_dev->rc_idx,
6579 readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
6580 if (!ret_l23)
6581 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
6582 pcie_dev->rc_idx);
6583 else
6584 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
6585 pcie_dev->rc_idx);
6586
6587 msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6588
6589 if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
6590 pinctrl_select_state(pcie_dev->pinctrl,
6591 pcie_dev->pins_sleep);
6592
6593 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6594
6595 return ret;
6596}
6597
6598static void msm_pcie_fixup_suspend(struct pci_dev *dev)
6599{
6600 int ret;
6601 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6602
6603 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6604
6605 if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
6606 return;
6607
6608 spin_lock_irqsave(&pcie_dev->cfg_lock,
6609 pcie_dev->irqsave_flags);
6610 if (pcie_dev->disable_pc) {
6611 PCIE_DBG(pcie_dev,
6612 "RC%d: Skip suspend because of user request\n",
6613 pcie_dev->rc_idx);
6614 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6615 pcie_dev->irqsave_flags);
6616 return;
6617 }
6618 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6619 pcie_dev->irqsave_flags);
6620
6621 mutex_lock(&pcie_dev->recovery_lock);
6622
6623 ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
6624 if (ret)
6625 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
6626 pcie_dev->rc_idx, ret);
6627
6628 mutex_unlock(&pcie_dev->recovery_lock);
6629}
6630DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6631 msm_pcie_fixup_suspend);
6632
6633/* Resume the PCIe link */
6634static int msm_pcie_pm_resume(struct pci_dev *dev,
6635 void *user, void *data, u32 options)
6636{
6637 int ret;
6638 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6639
6640 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6641
6642 if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
6643 pinctrl_select_state(pcie_dev->pinctrl,
6644 pcie_dev->pins_default);
6645
6646 spin_lock_irqsave(&pcie_dev->cfg_lock,
6647 pcie_dev->irqsave_flags);
6648 pcie_dev->cfg_access = true;
6649 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6650 pcie_dev->irqsave_flags);
6651
6652 ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6653 if (ret) {
6654 PCIE_ERR(pcie_dev,
6655 "PCIe: RC%d fail to enable PCIe link in resume.\n",
6656 pcie_dev->rc_idx);
6657 return ret;
6658 }
6659
6660 pcie_dev->suspending = false;
6661 PCIE_DBG(pcie_dev,
6662 "dev->bus->number = %d dev->bus->primary = %d\n",
6663 dev->bus->number, dev->bus->primary);
6664
6665 if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
6666 PCIE_DBG(pcie_dev,
6667 "RC%d: entry of PCI framework restore state\n",
6668 pcie_dev->rc_idx);
6669
6670 pci_load_and_free_saved_state(dev,
6671 &pcie_dev->saved_state);
6672 pci_restore_state(dev);
6673
6674 PCIE_DBG(pcie_dev,
6675 "RC%d: exit of PCI framework restore state\n",
6676 pcie_dev->rc_idx);
6677 }
6678
6679 if (pcie_dev->bridge_found) {
6680 PCIE_DBG(pcie_dev,
6681 "RC%d: entry of PCIe recover config\n",
6682 pcie_dev->rc_idx);
6683
6684 msm_pcie_recover_config(dev);
6685
6686 PCIE_DBG(pcie_dev,
6687 "RC%d: exit of PCIe recover config\n",
6688 pcie_dev->rc_idx);
6689 }
6690
6691 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6692
6693 return ret;
6694}
6695
6696void msm_pcie_fixup_resume(struct pci_dev *dev)
6697{
6698 int ret;
6699 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6700
6701 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6702
6703 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6704 pcie_dev->user_suspend)
6705 return;
6706
6707 mutex_lock(&pcie_dev->recovery_lock);
6708 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6709 if (ret)
6710 PCIE_ERR(pcie_dev,
6711 "PCIe: RC%d got failure in fixup resume:%d.\n",
6712 pcie_dev->rc_idx, ret);
6713
6714 mutex_unlock(&pcie_dev->recovery_lock);
6715}
6716DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6717 msm_pcie_fixup_resume);
6718
6719void msm_pcie_fixup_resume_early(struct pci_dev *dev)
6720{
6721 int ret;
6722 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6723
6724 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6725
6726 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6727 pcie_dev->user_suspend)
6728 return;
6729
6730 mutex_lock(&pcie_dev->recovery_lock);
6731 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6732 if (ret)
6733 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
6734 pcie_dev->rc_idx, ret);
6735
6736 mutex_unlock(&pcie_dev->recovery_lock);
6737}
6738DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6739 msm_pcie_fixup_resume_early);
6740
6741int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
6742 void *data, u32 options)
6743{
6744 int i, ret = 0;
6745 struct pci_dev *dev;
6746 u32 rc_idx = 0;
6747 struct msm_pcie_dev_t *pcie_dev;
6748
6749 PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
6750 pm_opt, busnr, options);
6751
6752
6753 if (!user) {
6754 pr_err("PCIe: endpoint device is NULL\n");
6755 ret = -ENODEV;
6756 goto out;
6757 }
6758
6759 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
6760
6761 if (pcie_dev) {
6762 rc_idx = pcie_dev->rc_idx;
6763 PCIE_DBG(pcie_dev,
6764 "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
6765 rc_idx, pm_opt, busnr, options);
6766 } else {
6767 pr_err(
6768 "PCIe: did not find RC for pci endpoint device.\n"
6769 );
6770 ret = -ENODEV;
6771 goto out;
6772 }
6773
6774 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6775 if (!busnr)
6776 break;
6777 if (user == pcie_dev->pcidev_table[i].dev) {
6778 if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
6779 break;
6780
6781 PCIE_ERR(pcie_dev,
6782 "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
6783 pcie_dev->rc_idx, busnr,
6784 pcie_dev->pcidev_table[i].bdf >> 24);
6785 ret = MSM_PCIE_ERROR;
6786 goto out;
6787 }
6788 }
6789
6790 if (i == MAX_DEVICE_NUM) {
6791 PCIE_ERR(pcie_dev,
6792 "PCIe: RC%d: endpoint device was not found in device table",
6793 pcie_dev->rc_idx);
6794 ret = MSM_PCIE_ERROR;
6795 goto out;
6796 }
6797
6798 dev = msm_pcie_dev[rc_idx].dev;
6799
6800 if (!msm_pcie_dev[rc_idx].drv_ready) {
6801 PCIE_ERR(&msm_pcie_dev[rc_idx],
6802 "RC%d has not been successfully probed yet\n",
6803 rc_idx);
6804 return -EPROBE_DEFER;
6805 }
6806
6807 switch (pm_opt) {
6808 case MSM_PCIE_SUSPEND:
6809 PCIE_DBG(&msm_pcie_dev[rc_idx],
6810 "User of RC%d requests to suspend the link\n", rc_idx);
6811 if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
6812 PCIE_DBG(&msm_pcie_dev[rc_idx],
6813 "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
6814 rc_idx, msm_pcie_dev[rc_idx].link_status);
6815
6816 if (!msm_pcie_dev[rc_idx].power_on) {
6817 PCIE_ERR(&msm_pcie_dev[rc_idx],
6818 "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
6819 rc_idx, msm_pcie_dev[rc_idx].link_status);
6820 break;
6821 }
6822
6823 if (msm_pcie_dev[rc_idx].pending_ep_reg) {
6824 PCIE_DBG(&msm_pcie_dev[rc_idx],
6825 "PCIe: RC%d: request to suspend the link is rejected\n",
6826 rc_idx);
6827 break;
6828 }
6829
6830 if (pcie_dev->num_active_ep) {
6831 PCIE_DBG(pcie_dev,
6832 "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
6833 pcie_dev->rc_idx, pcie_dev->num_active_ep);
6834 return ret;
6835 }
6836
6837 msm_pcie_dev[rc_idx].user_suspend = true;
6838
6839 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6840
6841 ret = msm_pcie_pm_suspend(dev, user, data, options);
6842 if (ret) {
6843 PCIE_ERR(&msm_pcie_dev[rc_idx],
6844 "PCIe: RC%d: user failed to suspend the link.\n",
6845 rc_idx);
6846 msm_pcie_dev[rc_idx].user_suspend = false;
6847 }
6848
6849 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6850 break;
6851 case MSM_PCIE_RESUME:
6852 PCIE_DBG(&msm_pcie_dev[rc_idx],
6853 "User of RC%d requests to resume the link\n", rc_idx);
6854 if (msm_pcie_dev[rc_idx].link_status !=
6855 MSM_PCIE_LINK_DISABLED) {
6856 PCIE_ERR(&msm_pcie_dev[rc_idx],
6857 "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
6858 rc_idx, msm_pcie_dev[rc_idx].link_status,
6859 msm_pcie_dev[rc_idx].num_active_ep);
6860 break;
6861 }
6862
6863 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6864 ret = msm_pcie_pm_resume(dev, user, data, options);
6865 if (ret) {
6866 PCIE_ERR(&msm_pcie_dev[rc_idx],
6867 "PCIe: RC%d: user failed to resume the link.\n",
6868 rc_idx);
6869 } else {
6870 PCIE_DBG(&msm_pcie_dev[rc_idx],
6871 "PCIe: RC%d: user succeeded to resume the link.\n",
6872 rc_idx);
6873
6874 msm_pcie_dev[rc_idx].user_suspend = false;
6875 }
6876
6877 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6878
6879 break;
6880 case MSM_PCIE_DISABLE_PC:
6881 PCIE_DBG(&msm_pcie_dev[rc_idx],
6882 "User of RC%d requests to keep the link always alive.\n",
6883 rc_idx);
6884 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6885 msm_pcie_dev[rc_idx].irqsave_flags);
6886 if (msm_pcie_dev[rc_idx].suspending) {
6887 PCIE_ERR(&msm_pcie_dev[rc_idx],
6888 "PCIe: RC%d Link has been suspended before request\n",
6889 rc_idx);
6890 ret = MSM_PCIE_ERROR;
6891 } else {
6892 msm_pcie_dev[rc_idx].disable_pc = true;
6893 }
6894 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6895 msm_pcie_dev[rc_idx].irqsave_flags);
6896 break;
6897 case MSM_PCIE_ENABLE_PC:
6898 PCIE_DBG(&msm_pcie_dev[rc_idx],
6899 "User of RC%d cancels the request of alive link.\n",
6900 rc_idx);
6901 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6902 msm_pcie_dev[rc_idx].irqsave_flags);
6903 msm_pcie_dev[rc_idx].disable_pc = false;
6904 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6905 msm_pcie_dev[rc_idx].irqsave_flags);
6906 break;
6907 default:
6908 PCIE_ERR(&msm_pcie_dev[rc_idx],
6909 "PCIe: RC%d: unsupported pm operation:%d.\n",
6910 rc_idx, pm_opt);
6911 ret = -ENODEV;
6912 goto out;
6913 }
6914
6915out:
6916 return ret;
6917}
6918EXPORT_SYMBOL(msm_pcie_pm_control);
6919
6920int msm_pcie_register_event(struct msm_pcie_register_event *reg)
6921{
6922 int i, ret = 0;
6923 struct msm_pcie_dev_t *pcie_dev;
6924
6925 if (!reg) {
6926 pr_err("PCIe: Event registration is NULL\n");
6927 return -ENODEV;
6928 }
6929
6930 if (!reg->user) {
6931 pr_err("PCIe: User of event registration is NULL\n");
6932 return -ENODEV;
6933 }
6934
6935 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6936
6937 if (!pcie_dev) {
6938 PCIE_ERR(pcie_dev, "%s",
6939 "PCIe: did not find RC for pci endpoint device.\n");
6940 return -ENODEV;
6941 }
6942
6943 if (pcie_dev->num_ep > 1) {
6944 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6945 if (reg->user ==
6946 pcie_dev->pcidev_table[i].dev) {
6947 pcie_dev->event_reg =
6948 pcie_dev->pcidev_table[i].event_reg;
6949
6950 if (!pcie_dev->event_reg) {
6951 pcie_dev->pcidev_table[i].registered =
6952 true;
6953
6954 pcie_dev->num_active_ep++;
6955 PCIE_DBG(pcie_dev,
6956 "PCIe: RC%d: number of active EP(s): %d.\n",
6957 pcie_dev->rc_idx,
6958 pcie_dev->num_active_ep);
6959 }
6960
6961 pcie_dev->event_reg = reg;
6962 pcie_dev->pcidev_table[i].event_reg = reg;
6963 PCIE_DBG(pcie_dev,
6964 "Event 0x%x is registered for RC %d\n",
6965 reg->events,
6966 pcie_dev->rc_idx);
6967
6968 break;
6969 }
6970 }
6971
6972 if (pcie_dev->pending_ep_reg) {
6973 for (i = 0; i < MAX_DEVICE_NUM; i++)
6974 if (!pcie_dev->pcidev_table[i].registered)
6975 break;
6976
6977 if (i == MAX_DEVICE_NUM)
6978 pcie_dev->pending_ep_reg = false;
6979 }
6980 } else {
6981 pcie_dev->event_reg = reg;
6982 PCIE_DBG(pcie_dev,
6983 "Event 0x%x is registered for RC %d\n", reg->events,
6984 pcie_dev->rc_idx);
6985 }
6986
6987 return ret;
6988}
6989EXPORT_SYMBOL(msm_pcie_register_event);
6990
6991int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
6992{
6993 int i, ret = 0;
6994 struct msm_pcie_dev_t *pcie_dev;
6995
6996 if (!reg) {
6997 pr_err("PCIe: Event deregistration is NULL\n");
6998 return -ENODEV;
6999 }
7000
7001 if (!reg->user) {
7002 pr_err("PCIe: User of event deregistration is NULL\n");
7003 return -ENODEV;
7004 }
7005
7006 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
7007
7008 if (!pcie_dev) {
7009 PCIE_ERR(pcie_dev, "%s",
7010 "PCIe: did not find RC for pci endpoint device.\n");
7011 return -ENODEV;
7012 }
7013
7014 if (pcie_dev->num_ep > 1) {
7015 for (i = 0; i < MAX_DEVICE_NUM; i++) {
7016 if (reg->user == pcie_dev->pcidev_table[i].dev) {
7017 if (pcie_dev->pcidev_table[i].event_reg) {
7018 pcie_dev->num_active_ep--;
7019 PCIE_DBG(pcie_dev,
7020 "PCIe: RC%d: number of active EP(s) left: %d.\n",
7021 pcie_dev->rc_idx,
7022 pcie_dev->num_active_ep);
7023 }
7024
7025 pcie_dev->event_reg = NULL;
7026 pcie_dev->pcidev_table[i].event_reg = NULL;
7027 PCIE_DBG(pcie_dev,
7028 "Event is deregistered for RC %d\n",
7029 pcie_dev->rc_idx);
7030
7031 break;
7032 }
7033 }
7034 } else {
7035 pcie_dev->event_reg = NULL;
7036 PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
7037 pcie_dev->rc_idx);
7038 }
7039
7040 return ret;
7041}
7042EXPORT_SYMBOL(msm_pcie_deregister_event);
7043
7044int msm_pcie_recover_config(struct pci_dev *dev)
7045{
7046 int ret = 0;
7047 struct msm_pcie_dev_t *pcie_dev;
7048
7049 if (dev) {
7050 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7051 PCIE_DBG(pcie_dev,
7052 "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
7053 } else {
7054 pr_err("PCIe: the input pci dev is NULL.\n");
7055 return -ENODEV;
7056 }
7057
7058 if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
7059 PCIE_DBG(pcie_dev,
7060 "Recover config space of RC%d and its EP\n",
7061 pcie_dev->rc_idx);
7062 pcie_dev->shadow_en = false;
7063 PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
7064 msm_pcie_cfg_recover(pcie_dev, true);
7065 PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
7066 msm_pcie_cfg_recover(pcie_dev, false);
7067 PCIE_DBG(pcie_dev,
7068 "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
7069 pcie_dev->rc_idx);
7070 pci_save_state(pcie_dev->dev);
7071 pci_save_state(dev);
7072 pcie_dev->shadow_en = true;
7073 PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
7074 pcie_dev->rc_idx);
7075 } else {
7076 PCIE_ERR(pcie_dev,
7077 "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
7078 pcie_dev->rc_idx);
7079 ret = -ENODEV;
7080 }
7081
7082 return ret;
7083}
7084EXPORT_SYMBOL(msm_pcie_recover_config);
7085
7086int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
7087{
7088 int ret = 0;
7089 struct msm_pcie_dev_t *pcie_dev;
7090
7091 if (dev) {
7092 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7093 PCIE_DBG(pcie_dev,
7094 "User requests to %s shadow\n",
7095 enable ? "enable" : "disable");
7096 } else {
7097 pr_err("PCIe: the input pci dev is NULL.\n");
7098 return -ENODEV;
7099 }
7100
7101 PCIE_DBG(pcie_dev,
7102 "The shadowing of RC%d is %s enabled currently.\n",
7103 pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
7104
7105 pcie_dev->shadow_en = enable;
7106
7107 PCIE_DBG(pcie_dev,
7108 "Shadowing of RC%d is turned %s upon user's request.\n",
7109 pcie_dev->rc_idx, enable ? "on" : "off");
7110
7111 return ret;
7112}
7113EXPORT_SYMBOL(msm_pcie_shadow_control);