blob: 6523cb082dc91c12c74a6235453307bcefe53e87 [file] [log] [blame]
Tony Truong349ee492014-10-01 17:35:56 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/gpio.h>
23#include <linux/iopoll.h>
24#include <linux/kernel.h>
25#include <linux/of_pci.h>
26#include <linux/pci.h>
Tony Truong52122a62017-03-23 18:00:34 -070027#include <linux/iommu.h>
Tony Truong349ee492014-10-01 17:35:56 -070028#include <linux/platform_device.h>
29#include <linux/regulator/consumer.h>
30#include <linux/regulator/rpm-smd-regulator.h>
31#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/of_gpio.h>
34#include <linux/clk/msm-clk.h>
35#include <linux/reset.h>
36#include <linux/msm-bus.h>
37#include <linux/msm-bus-board.h>
38#include <linux/debugfs.h>
39#include <linux/uaccess.h>
40#include <linux/io.h>
41#include <linux/msi.h>
42#include <linux/interrupt.h>
43#include <linux/irq.h>
44#include <linux/irqdomain.h>
45#include <linux/pm_wakeup.h>
46#include <linux/compiler.h>
47#include <soc/qcom/scm.h>
48#include <linux/ipc_logging.h>
49#include <linux/msm_pcie.h>
50
51#ifdef CONFIG_ARCH_MDMCALIFORNIUM
52#define PCIE_VENDOR_ID_RCP 0x17cb
53#define PCIE_DEVICE_ID_RCP 0x0302
54
55#define PCIE20_L1SUB_CONTROL1 0x158
56#define PCIE20_PARF_DBI_BASE_ADDR 0x350
57#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
58
59#define TX_BASE 0x200
60#define RX_BASE 0x400
61#define PCS_BASE 0x800
62#define PCS_MISC_BASE 0x600
63
64#elif defined(CONFIG_ARCH_MSM8998)
65#define PCIE_VENDOR_ID_RCP 0x17cb
66#define PCIE_DEVICE_ID_RCP 0x0105
67
68#define PCIE20_L1SUB_CONTROL1 0x1E4
69#define PCIE20_PARF_DBI_BASE_ADDR 0x350
70#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
71
72#define TX_BASE 0
73#define RX_BASE 0
74#define PCS_BASE 0x800
75#define PCS_MISC_BASE 0
76
77#else
78#define PCIE_VENDOR_ID_RCP 0x17cb
79#define PCIE_DEVICE_ID_RCP 0x0104
80
81#define PCIE20_L1SUB_CONTROL1 0x158
82#define PCIE20_PARF_DBI_BASE_ADDR 0x168
83#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
84
85#define TX_BASE 0x1000
86#define RX_BASE 0x1200
87#define PCS_BASE 0x1400
88#define PCS_MISC_BASE 0
89#endif
90
91#define TX(n, m) (TX_BASE + n * m * 0x1000)
92#define RX(n, m) (RX_BASE + n * m * 0x1000)
93#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
94#define PCS_MISC_PORT(n, m) (PCS_MISC_BASE + n * m * 0x1000)
95
96#define QSERDES_COM_BG_TIMER 0x00C
97#define QSERDES_COM_SSC_EN_CENTER 0x010
98#define QSERDES_COM_SSC_ADJ_PER1 0x014
99#define QSERDES_COM_SSC_ADJ_PER2 0x018
100#define QSERDES_COM_SSC_PER1 0x01C
101#define QSERDES_COM_SSC_PER2 0x020
102#define QSERDES_COM_SSC_STEP_SIZE1 0x024
103#define QSERDES_COM_SSC_STEP_SIZE2 0x028
104#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
105#define QSERDES_COM_CLK_ENABLE1 0x038
106#define QSERDES_COM_SYS_CLK_CTRL 0x03C
107#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
108#define QSERDES_COM_PLL_IVCO 0x048
109#define QSERDES_COM_LOCK_CMP1_MODE0 0x04C
110#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
111#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
112#define QSERDES_COM_BG_TRIM 0x070
113#define QSERDES_COM_CLK_EP_DIV 0x074
114#define QSERDES_COM_CP_CTRL_MODE0 0x078
115#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
116#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
117#define QSERDES_COM_SYSCLK_EN_SEL 0x0AC
118#define QSERDES_COM_RESETSM_CNTRL 0x0B4
119#define QSERDES_COM_RESTRIM_CTRL 0x0BC
120#define QSERDES_COM_RESCODE_DIV_NUM 0x0C4
121#define QSERDES_COM_LOCK_CMP_EN 0x0C8
122#define QSERDES_COM_DEC_START_MODE0 0x0D0
123#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0DC
124#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0E0
125#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0E4
126#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
127#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10C
128#define QSERDES_COM_VCO_TUNE_CTRL 0x124
129#define QSERDES_COM_VCO_TUNE_MAP 0x128
130#define QSERDES_COM_VCO_TUNE1_MODE0 0x12C
131#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
132#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
133#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
134#define QSERDES_COM_BG_CTRL 0x170
135#define QSERDES_COM_CLK_SELECT 0x174
136#define QSERDES_COM_HSCLK_SEL 0x178
137#define QSERDES_COM_CORECLK_DIV 0x184
138#define QSERDES_COM_CORE_CLK_EN 0x18C
139#define QSERDES_COM_C_READY_STATUS 0x190
140#define QSERDES_COM_CMN_CONFIG 0x194
141#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19C
142#define QSERDES_COM_DEBUG_BUS0 0x1A0
143#define QSERDES_COM_DEBUG_BUS1 0x1A4
144#define QSERDES_COM_DEBUG_BUS2 0x1A8
145#define QSERDES_COM_DEBUG_BUS3 0x1AC
146#define QSERDES_COM_DEBUG_BUS_SEL 0x1B0
147
148#define QSERDES_TX_N_RES_CODE_LANE_OFFSET(n, m) (TX(n, m) + 0x4C)
149#define QSERDES_TX_N_DEBUG_BUS_SEL(n, m) (TX(n, m) + 0x64)
150#define QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(n, m) (TX(n, m) + 0x68)
151#define QSERDES_TX_N_LANE_MODE(n, m) (TX(n, m) + 0x94)
152#define QSERDES_TX_N_RCV_DETECT_LVL_2(n, m) (TX(n, m) + 0xAC)
153
154#define QSERDES_RX_N_UCDR_SO_GAIN_HALF(n, m) (RX(n, m) + 0x010)
155#define QSERDES_RX_N_UCDR_SO_GAIN(n, m) (RX(n, m) + 0x01C)
156#define QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(n, m) (RX(n, m) + 0x048)
157#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(n, m) (RX(n, m) + 0x0D8)
158#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(n, m) (RX(n, m) + 0x0DC)
159#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(n, m) (RX(n, m) + 0x0E0)
160#define QSERDES_RX_N_SIGDET_ENABLES(n, m) (RX(n, m) + 0x110)
161#define QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(n, m) (RX(n, m) + 0x11C)
162#define QSERDES_RX_N_SIGDET_LVL(n, m) (RX(n, m) + 0x118)
163#define QSERDES_RX_N_RX_BAND(n, m) (RX(n, m) + 0x120)
164
165#define PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x00)
166#define PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x04)
167#define PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x08)
168#define PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x0C)
169#define PCIE_MISC_N_DEBUG_BUS_0_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x14)
170#define PCIE_MISC_N_DEBUG_BUS_1_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x18)
171#define PCIE_MISC_N_DEBUG_BUS_2_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x1C)
172#define PCIE_MISC_N_DEBUG_BUS_3_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x20)
173
174#define PCIE_N_SW_RESET(n, m) (PCS_PORT(n, m) + 0x00)
175#define PCIE_N_POWER_DOWN_CONTROL(n, m) (PCS_PORT(n, m) + 0x04)
176#define PCIE_N_START_CONTROL(n, m) (PCS_PORT(n, m) + 0x08)
177#define PCIE_N_TXDEEMPH_M6DB_V0(n, m) (PCS_PORT(n, m) + 0x24)
178#define PCIE_N_TXDEEMPH_M3P5DB_V0(n, m) (PCS_PORT(n, m) + 0x28)
179#define PCIE_N_ENDPOINT_REFCLK_DRIVE(n, m) (PCS_PORT(n, m) + 0x54)
180#define PCIE_N_RX_IDLE_DTCT_CNTRL(n, m) (PCS_PORT(n, m) + 0x58)
181#define PCIE_N_POWER_STATE_CONFIG1(n, m) (PCS_PORT(n, m) + 0x60)
182#define PCIE_N_POWER_STATE_CONFIG4(n, m) (PCS_PORT(n, m) + 0x6C)
183#define PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA0)
184#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA4)
185#define PCIE_N_PLL_LOCK_CHK_DLY_TIME(n, m) (PCS_PORT(n, m) + 0xA8)
186#define PCIE_N_TEST_CONTROL4(n, m) (PCS_PORT(n, m) + 0x11C)
187#define PCIE_N_TEST_CONTROL5(n, m) (PCS_PORT(n, m) + 0x120)
188#define PCIE_N_TEST_CONTROL6(n, m) (PCS_PORT(n, m) + 0x124)
189#define PCIE_N_TEST_CONTROL7(n, m) (PCS_PORT(n, m) + 0x128)
190#define PCIE_N_PCS_STATUS(n, m) (PCS_PORT(n, m) + 0x174)
191#define PCIE_N_DEBUG_BUS_0_STATUS(n, m) (PCS_PORT(n, m) + 0x198)
192#define PCIE_N_DEBUG_BUS_1_STATUS(n, m) (PCS_PORT(n, m) + 0x19C)
193#define PCIE_N_DEBUG_BUS_2_STATUS(n, m) (PCS_PORT(n, m) + 0x1A0)
194#define PCIE_N_DEBUG_BUS_3_STATUS(n, m) (PCS_PORT(n, m) + 0x1A4)
195#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1A8)
196#define PCIE_N_OSC_DTCT_ACTIONS(n, m) (PCS_PORT(n, m) + 0x1AC)
197#define PCIE_N_SIGDET_CNTRL(n, m) (PCS_PORT(n, m) + 0x1B0)
198#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(n, m) (PCS_PORT(n, m) + 0x1DC)
199#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1E0)
200
201#define PCIE_COM_SW_RESET 0x400
202#define PCIE_COM_POWER_DOWN_CONTROL 0x404
203#define PCIE_COM_START_CONTROL 0x408
204#define PCIE_COM_DEBUG_BUS_BYTE0_INDEX 0x438
205#define PCIE_COM_DEBUG_BUS_BYTE1_INDEX 0x43C
206#define PCIE_COM_DEBUG_BUS_BYTE2_INDEX 0x440
207#define PCIE_COM_DEBUG_BUS_BYTE3_INDEX 0x444
208#define PCIE_COM_PCS_READY_STATUS 0x448
209#define PCIE_COM_DEBUG_BUS_0_STATUS 0x45C
210#define PCIE_COM_DEBUG_BUS_1_STATUS 0x460
211#define PCIE_COM_DEBUG_BUS_2_STATUS 0x464
212#define PCIE_COM_DEBUG_BUS_3_STATUS 0x468
213
214#define PCIE20_PARF_SYS_CTRL 0x00
215#define PCIE20_PARF_PM_STTS 0x24
216#define PCIE20_PARF_PCS_DEEMPH 0x34
217#define PCIE20_PARF_PCS_SWING 0x38
218#define PCIE20_PARF_PHY_CTRL 0x40
219#define PCIE20_PARF_PHY_REFCLK 0x4C
220#define PCIE20_PARF_CONFIG_BITS 0x50
221#define PCIE20_PARF_TEST_BUS 0xE4
222#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
223#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
224#define PCIE20_PARF_LTSSM 0x1B0
225#define PCIE20_PARF_INT_ALL_STATUS 0x224
226#define PCIE20_PARF_INT_ALL_CLEAR 0x228
227#define PCIE20_PARF_INT_ALL_MASK 0x22C
228#define PCIE20_PARF_SID_OFFSET 0x234
229#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
230#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
231
232#define PCIE20_ELBI_VERSION 0x00
233#define PCIE20_ELBI_SYS_CTRL 0x04
234#define PCIE20_ELBI_SYS_STTS 0x08
235
236#define PCIE20_CAP 0x70
237#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
238#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
239
240#define PCIE20_COMMAND_STATUS 0x04
241#define PCIE20_HEADER_TYPE 0x0C
242#define PCIE20_BUSNUMBERS 0x18
243#define PCIE20_MEMORY_BASE_LIMIT 0x20
244#define PCIE20_BRIDGE_CTRL 0x3C
245#define PCIE20_DEVICE_CONTROL_STATUS 0x78
246#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
247
248#define PCIE20_AUX_CLK_FREQ_REG 0xB40
249#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
250#define PCIE20_ACK_N_FTS 0xff00
251
252#define PCIE20_PLR_IATU_VIEWPORT 0x900
253#define PCIE20_PLR_IATU_CTRL1 0x904
254#define PCIE20_PLR_IATU_CTRL2 0x908
255#define PCIE20_PLR_IATU_LBAR 0x90C
256#define PCIE20_PLR_IATU_UBAR 0x910
257#define PCIE20_PLR_IATU_LAR 0x914
258#define PCIE20_PLR_IATU_LTAR 0x918
259#define PCIE20_PLR_IATU_UTAR 0x91c
260
261#define PCIE20_CTRL1_TYPE_CFG0 0x04
262#define PCIE20_CTRL1_TYPE_CFG1 0x05
263
264#define PCIE20_CAP_ID 0x10
265#define L1SUB_CAP_ID 0x1E
266
267#define PCIE_CAP_PTR_OFFSET 0x34
268#define PCIE_EXT_CAP_OFFSET 0x100
269
270#define PCIE20_AER_UNCORR_ERR_STATUS_REG 0x104
271#define PCIE20_AER_CORR_ERR_STATUS_REG 0x110
272#define PCIE20_AER_ROOT_ERR_STATUS_REG 0x130
273#define PCIE20_AER_ERR_SRC_ID_REG 0x134
274
275#define RD 0
276#define WR 1
277#define MSM_PCIE_ERROR -1
278
279#define PERST_PROPAGATION_DELAY_US_MIN 1000
280#define PERST_PROPAGATION_DELAY_US_MAX 1005
281#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
282#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
283#define LINK_UP_TIMEOUT_US_MIN 5000
284#define LINK_UP_TIMEOUT_US_MAX 5100
285#define LINK_UP_CHECK_MAX_COUNT 20
286#define PHY_STABILIZATION_DELAY_US_MIN 995
287#define PHY_STABILIZATION_DELAY_US_MAX 1005
288#define POWER_DOWN_DELAY_US_MIN 10
289#define POWER_DOWN_DELAY_US_MAX 11
290#define LINKDOWN_INIT_WAITING_US_MIN 995
291#define LINKDOWN_INIT_WAITING_US_MAX 1005
292#define LINKDOWN_WAITING_US_MIN 4900
293#define LINKDOWN_WAITING_US_MAX 5100
294#define LINKDOWN_WAITING_COUNT 200
295
296#define PHY_READY_TIMEOUT_COUNT 10
297#define XMLH_LINK_UP 0x400
298#define MAX_LINK_RETRIES 5
299#define MAX_BUS_NUM 3
300#define MAX_PROP_SIZE 32
301#define MAX_RC_NAME_LEN 15
302#define MSM_PCIE_MAX_VREG 4
303#define MSM_PCIE_MAX_CLK 9
304#define MSM_PCIE_MAX_PIPE_CLK 1
305#define MAX_RC_NUM 3
306#define MAX_DEVICE_NUM 20
307#define MAX_SHORT_BDF_NUM 16
308#define PCIE_TLP_RD_SIZE 0x5
309#define PCIE_MSI_NR_IRQS 256
310#define MSM_PCIE_MAX_MSI 32
311#define MAX_MSG_LEN 80
312#define PCIE_LOG_PAGES (50)
313#define PCIE_CONF_SPACE_DW 1024
314#define PCIE_CLEAR 0xDEADBEEF
315#define PCIE_LINK_DOWN 0xFFFFFFFF
316
317#define MSM_PCIE_MAX_RESET 4
318#define MSM_PCIE_MAX_PIPE_RESET 1
319
320#define MSM_PCIE_MSI_PHY 0xa0000000
321#define PCIE20_MSI_CTRL_ADDR (0x820)
322#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
323#define PCIE20_MSI_CTRL_INTR_EN (0x828)
324#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
325#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
326#define PCIE20_MSI_CTRL_MAX 8
327
328/* PM control options */
329#define PM_IRQ 0x1
330#define PM_CLK 0x2
331#define PM_GPIO 0x4
332#define PM_VREG 0x8
333#define PM_PIPE_CLK 0x10
334#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
335
336#ifdef CONFIG_PHYS_ADDR_T_64BIT
337#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
338#else
339#define PCIE_UPPER_ADDR(addr) (0x0)
340#endif
341#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
342
343/* Config Space Offsets */
344#define BDF_OFFSET(bus, devfn) \
345 ((bus << 24) | (devfn << 16))
346
347#define PCIE_GEN_DBG(x...) do { \
348 if (msm_pcie_debug_mask) \
349 pr_alert(x); \
350 } while (0)
351
352#define PCIE_DBG(dev, fmt, arg...) do { \
353 if ((dev) && (dev)->ipc_log_long) \
354 ipc_log_string((dev)->ipc_log_long, \
355 "DBG1:%s: " fmt, __func__, arg); \
356 if ((dev) && (dev)->ipc_log) \
357 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
358 if (msm_pcie_debug_mask) \
359 pr_alert("%s: " fmt, __func__, arg); \
360 } while (0)
361
362#define PCIE_DBG2(dev, fmt, arg...) do { \
363 if ((dev) && (dev)->ipc_log) \
364 ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
365 if (msm_pcie_debug_mask) \
366 pr_alert("%s: " fmt, __func__, arg); \
367 } while (0)
368
369#define PCIE_DBG3(dev, fmt, arg...) do { \
370 if ((dev) && (dev)->ipc_log) \
371 ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
372 if (msm_pcie_debug_mask) \
373 pr_alert("%s: " fmt, __func__, arg); \
374 } while (0)
375
376#define PCIE_DUMP(dev, fmt, arg...) do { \
377 if ((dev) && (dev)->ipc_log_dump) \
378 ipc_log_string((dev)->ipc_log_dump, \
379 "DUMP:%s: " fmt, __func__, arg); \
380 } while (0)
381
382#define PCIE_DBG_FS(dev, fmt, arg...) do { \
383 if ((dev) && (dev)->ipc_log_dump) \
384 ipc_log_string((dev)->ipc_log_dump, \
385 "DBG_FS:%s: " fmt, __func__, arg); \
386 pr_alert("%s: " fmt, __func__, arg); \
387 } while (0)
388
389#define PCIE_INFO(dev, fmt, arg...) do { \
390 if ((dev) && (dev)->ipc_log_long) \
391 ipc_log_string((dev)->ipc_log_long, \
392 "INFO:%s: " fmt, __func__, arg); \
393 if ((dev) && (dev)->ipc_log) \
394 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
395 pr_info("%s: " fmt, __func__, arg); \
396 } while (0)
397
398#define PCIE_ERR(dev, fmt, arg...) do { \
399 if ((dev) && (dev)->ipc_log_long) \
400 ipc_log_string((dev)->ipc_log_long, \
401 "ERR:%s: " fmt, __func__, arg); \
402 if ((dev) && (dev)->ipc_log) \
403 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
404 pr_err("%s: " fmt, __func__, arg); \
405 } while (0)
406
407
408enum msm_pcie_res {
409 MSM_PCIE_RES_PARF,
410 MSM_PCIE_RES_PHY,
411 MSM_PCIE_RES_DM_CORE,
412 MSM_PCIE_RES_ELBI,
413 MSM_PCIE_RES_CONF,
414 MSM_PCIE_RES_IO,
415 MSM_PCIE_RES_BARS,
416 MSM_PCIE_RES_TCSR,
417 MSM_PCIE_MAX_RES,
418};
419
420enum msm_pcie_irq {
421 MSM_PCIE_INT_MSI,
422 MSM_PCIE_INT_A,
423 MSM_PCIE_INT_B,
424 MSM_PCIE_INT_C,
425 MSM_PCIE_INT_D,
426 MSM_PCIE_INT_PLS_PME,
427 MSM_PCIE_INT_PME_LEGACY,
428 MSM_PCIE_INT_PLS_ERR,
429 MSM_PCIE_INT_AER_LEGACY,
430 MSM_PCIE_INT_LINK_UP,
431 MSM_PCIE_INT_LINK_DOWN,
432 MSM_PCIE_INT_BRIDGE_FLUSH_N,
433 MSM_PCIE_INT_GLOBAL_INT,
434 MSM_PCIE_MAX_IRQ,
435};
436
437enum msm_pcie_irq_event {
438 MSM_PCIE_INT_EVT_LINK_DOWN = 1,
439 MSM_PCIE_INT_EVT_BME,
440 MSM_PCIE_INT_EVT_PM_TURNOFF,
441 MSM_PCIE_INT_EVT_DEBUG,
442 MSM_PCIE_INT_EVT_LTR,
443 MSM_PCIE_INT_EVT_MHI_Q6,
444 MSM_PCIE_INT_EVT_MHI_A7,
445 MSM_PCIE_INT_EVT_DSTATE_CHANGE,
446 MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
447 MSM_PCIE_INT_EVT_MMIO_WRITE,
448 MSM_PCIE_INT_EVT_CFG_WRITE,
449 MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
450 MSM_PCIE_INT_EVT_LINK_UP,
451 MSM_PCIE_INT_EVT_AER_LEGACY,
452 MSM_PCIE_INT_EVT_AER_ERR,
453 MSM_PCIE_INT_EVT_PME_LEGACY,
454 MSM_PCIE_INT_EVT_PLS_PME,
455 MSM_PCIE_INT_EVT_INTD,
456 MSM_PCIE_INT_EVT_INTC,
457 MSM_PCIE_INT_EVT_INTB,
458 MSM_PCIE_INT_EVT_INTA,
459 MSM_PCIE_INT_EVT_EDMA,
460 MSM_PCIE_INT_EVT_MSI_0,
461 MSM_PCIE_INT_EVT_MSI_1,
462 MSM_PCIE_INT_EVT_MSI_2,
463 MSM_PCIE_INT_EVT_MSI_3,
464 MSM_PCIE_INT_EVT_MSI_4,
465 MSM_PCIE_INT_EVT_MSI_5,
466 MSM_PCIE_INT_EVT_MSI_6,
467 MSM_PCIE_INT_EVT_MSI_7,
468 MSM_PCIE_INT_EVT_MAX = 30,
469};
470
471enum msm_pcie_gpio {
472 MSM_PCIE_GPIO_PERST,
473 MSM_PCIE_GPIO_WAKE,
474 MSM_PCIE_GPIO_EP,
475 MSM_PCIE_MAX_GPIO
476};
477
478enum msm_pcie_link_status {
479 MSM_PCIE_LINK_DEINIT,
480 MSM_PCIE_LINK_ENABLED,
481 MSM_PCIE_LINK_DISABLED
482};
483
Tony Truong9f2c7722017-02-28 15:02:27 -0800484enum msm_pcie_boot_option {
485 MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
486 MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
487};
488
Tony Truong349ee492014-10-01 17:35:56 -0700489/* gpio info structure */
490struct msm_pcie_gpio_info_t {
491 char *name;
492 uint32_t num;
493 bool out;
494 uint32_t on;
495 uint32_t init;
496 bool required;
497};
498
499/* voltage regulator info structrue */
500struct msm_pcie_vreg_info_t {
501 struct regulator *hdl;
502 char *name;
503 uint32_t max_v;
504 uint32_t min_v;
505 uint32_t opt_mode;
506 bool required;
507};
508
509/* reset info structure */
510struct msm_pcie_reset_info_t {
511 struct reset_control *hdl;
512 char *name;
513 bool required;
514};
515
516/* clock info structure */
517struct msm_pcie_clk_info_t {
518 struct clk *hdl;
519 char *name;
520 u32 freq;
521 bool config_mem;
522 bool required;
523};
524
525/* resource info structure */
526struct msm_pcie_res_info_t {
527 char *name;
528 struct resource *resource;
529 void __iomem *base;
530};
531
532/* irq info structrue */
533struct msm_pcie_irq_info_t {
534 char *name;
535 uint32_t num;
536};
537
538/* phy info structure */
539struct msm_pcie_phy_info_t {
540 u32 offset;
541 u32 val;
542 u32 delay;
543};
544
545/* PCIe device info structure */
546struct msm_pcie_device_info {
547 u32 bdf;
548 struct pci_dev *dev;
549 short short_bdf;
550 u32 sid;
551 int domain;
552 void __iomem *conf_base;
553 unsigned long phy_address;
554 u32 dev_ctrlstts_offset;
555 struct msm_pcie_register_event *event_reg;
556 bool registered;
557};
558
559/* msm pcie device structure */
560struct msm_pcie_dev_t {
561 struct platform_device *pdev;
562 struct pci_dev *dev;
563 struct regulator *gdsc;
564 struct regulator *gdsc_smmu;
565 struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
566 struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
567 struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
568 struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
569 struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
570 struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
571 struct msm_pcie_irq_info_t msi[MSM_PCIE_MAX_MSI];
572 struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
573 struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
574
575 void __iomem *parf;
576 void __iomem *phy;
577 void __iomem *elbi;
578 void __iomem *dm_core;
579 void __iomem *conf;
580 void __iomem *bars;
581 void __iomem *tcsr;
582
583 uint32_t axi_bar_start;
584 uint32_t axi_bar_end;
585
586 struct resource *dev_mem_res;
587 struct resource *dev_io_res;
588
589 uint32_t wake_n;
590 uint32_t vreg_n;
591 uint32_t gpio_n;
592 uint32_t parf_deemph;
593 uint32_t parf_swing;
594
595 bool cfg_access;
596 spinlock_t cfg_lock;
597 unsigned long irqsave_flags;
598 struct mutex enumerate_lock;
599 struct mutex setup_lock;
600
601 struct irq_domain *irq_domain;
602 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
603 uint32_t msi_gicm_addr;
604 uint32_t msi_gicm_base;
605 bool use_msi;
606
607 enum msm_pcie_link_status link_status;
608 bool user_suspend;
609 bool disable_pc;
610 struct pci_saved_state *saved_state;
611
612 struct wakeup_source ws;
613 struct msm_bus_scale_pdata *bus_scale_table;
614 uint32_t bus_client;
615
616 bool l0s_supported;
617 bool l1_supported;
618 bool l1ss_supported;
619 bool common_clk_en;
620 bool clk_power_manage_en;
621 bool aux_clk_sync;
622 bool aer_enable;
623 bool smmu_exist;
624 uint32_t smmu_sid_base;
625 uint32_t n_fts;
626 bool ext_ref_clk;
627 bool common_phy;
628 uint32_t ep_latency;
629 uint32_t wr_halt_size;
630 uint32_t cpl_timeout;
631 uint32_t current_bdf;
632 short current_short_bdf;
633 uint32_t perst_delay_us_min;
634 uint32_t perst_delay_us_max;
635 uint32_t tlp_rd_size;
636 bool linkdown_panic;
Tony Truong9f2c7722017-02-28 15:02:27 -0800637 uint32_t boot_option;
Tony Truong349ee492014-10-01 17:35:56 -0700638
639 uint32_t rc_idx;
640 uint32_t phy_ver;
641 bool drv_ready;
642 bool enumerated;
643 struct work_struct handle_wake_work;
644 struct mutex recovery_lock;
645 spinlock_t linkdown_lock;
646 spinlock_t wakeup_lock;
647 spinlock_t global_irq_lock;
648 spinlock_t aer_lock;
649 ulong linkdown_counter;
650 ulong link_turned_on_counter;
651 ulong link_turned_off_counter;
652 ulong rc_corr_counter;
653 ulong rc_non_fatal_counter;
654 ulong rc_fatal_counter;
655 ulong ep_corr_counter;
656 ulong ep_non_fatal_counter;
657 ulong ep_fatal_counter;
658 bool suspending;
659 ulong wake_counter;
660 u32 num_active_ep;
661 u32 num_ep;
662 bool pending_ep_reg;
663 u32 phy_len;
664 u32 port_phy_len;
665 struct msm_pcie_phy_info_t *phy_sequence;
666 struct msm_pcie_phy_info_t *port_phy_sequence;
667 u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
668 u32 rc_shadow[PCIE_CONF_SPACE_DW];
669 bool shadow_en;
670 bool bridge_found;
671 struct msm_pcie_register_event *event_reg;
672 unsigned int scm_dev_id;
673 bool power_on;
674 void *ipc_log;
675 void *ipc_log_long;
676 void *ipc_log_dump;
677 bool use_19p2mhz_aux_clk;
678 bool use_pinctrl;
679 struct pinctrl *pinctrl;
680 struct pinctrl_state *pins_default;
681 struct pinctrl_state *pins_sleep;
682 struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
683};
684
685
686/* debug mask sys interface */
687static int msm_pcie_debug_mask;
688module_param_named(debug_mask, msm_pcie_debug_mask,
689 int, 0644);
690
691/* debugfs values */
692static u32 rc_sel;
693static u32 base_sel;
694static u32 wr_offset;
695static u32 wr_mask;
696static u32 wr_value;
697static ulong corr_counter_limit = 5;
698
699/* counter to keep track if common PHY needs to be configured */
700static u32 num_rc_on;
701
702/* global lock for PCIe common PHY */
703static struct mutex com_phy_lock;
704
705/* Table to track info of PCIe devices */
706static struct msm_pcie_device_info
707 msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
708
709/* PCIe driver state */
710struct pcie_drv_sta {
711 u32 rc_num;
712 struct mutex drv_lock;
713} pcie_drv;
714
715/* msm pcie device data */
716static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
717
718/* regulators */
719static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
720 {NULL, "vreg-3.3", 0, 0, 0, false},
721 {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
722 {NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
723 {NULL, "vreg-cx", 0, 0, 0, false}
724};
725
726/* GPIOs */
727static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
728 {"perst-gpio", 0, 1, 0, 0, 1},
729 {"wake-gpio", 0, 0, 0, 0, 0},
730 {"qcom,ep-gpio", 0, 1, 1, 0, 0}
731};
732
733/* resets */
734static struct msm_pcie_reset_info_t
735msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
736 {
737 {NULL, "pcie_phy_reset", false},
738 {NULL, "pcie_phy_com_reset", false},
739 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
740 {NULL, "pcie_0_phy_reset", false}
741 },
742 {
743 {NULL, "pcie_phy_reset", false},
744 {NULL, "pcie_phy_com_reset", false},
745 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
746 {NULL, "pcie_1_phy_reset", false}
747 },
748 {
749 {NULL, "pcie_phy_reset", false},
750 {NULL, "pcie_phy_com_reset", false},
751 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
752 {NULL, "pcie_2_phy_reset", false}
753 }
754};
755
756/* pipe reset */
757static struct msm_pcie_reset_info_t
758msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
759 {
760 {NULL, "pcie_0_phy_pipe_reset", false}
761 },
762 {
763 {NULL, "pcie_1_phy_pipe_reset", false}
764 },
765 {
766 {NULL, "pcie_2_phy_pipe_reset", false}
767 }
768};
769
770/* clocks */
771static struct msm_pcie_clk_info_t
772 msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
773 {
774 {NULL, "pcie_0_ref_clk_src", 0, false, false},
775 {NULL, "pcie_0_aux_clk", 1010000, false, true},
776 {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
777 {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
778 {NULL, "pcie_0_slv_axi_clk", 0, true, true},
779 {NULL, "pcie_0_ldo", 0, false, true},
780 {NULL, "pcie_0_smmu_clk", 0, false, false},
781 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
782 {NULL, "pcie_phy_aux_clk", 0, false, false}
783 },
784 {
785 {NULL, "pcie_1_ref_clk_src", 0, false, false},
786 {NULL, "pcie_1_aux_clk", 1010000, false, true},
787 {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
788 {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
789 {NULL, "pcie_1_slv_axi_clk", 0, true, true},
790 {NULL, "pcie_1_ldo", 0, false, true},
791 {NULL, "pcie_1_smmu_clk", 0, false, false},
792 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
793 {NULL, "pcie_phy_aux_clk", 0, false, false}
794 },
795 {
796 {NULL, "pcie_2_ref_clk_src", 0, false, false},
797 {NULL, "pcie_2_aux_clk", 1010000, false, true},
798 {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
799 {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
800 {NULL, "pcie_2_slv_axi_clk", 0, true, true},
801 {NULL, "pcie_2_ldo", 0, false, true},
802 {NULL, "pcie_2_smmu_clk", 0, false, false},
803 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
804 {NULL, "pcie_phy_aux_clk", 0, false, false}
805 }
806};
807
808/* Pipe Clocks */
809static struct msm_pcie_clk_info_t
810 msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
811 {
812 {NULL, "pcie_0_pipe_clk", 125000000, true, true},
813 },
814 {
815 {NULL, "pcie_1_pipe_clk", 125000000, true, true},
816 },
817 {
818 {NULL, "pcie_2_pipe_clk", 125000000, true, true},
819 }
820};
821
822/* resources */
823static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
824 {"parf", 0, 0},
825 {"phy", 0, 0},
826 {"dm_core", 0, 0},
827 {"elbi", 0, 0},
828 {"conf", 0, 0},
829 {"io", 0, 0},
830 {"bars", 0, 0},
831 {"tcsr", 0, 0}
832};
833
834/* irqs */
835static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
836 {"int_msi", 0},
837 {"int_a", 0},
838 {"int_b", 0},
839 {"int_c", 0},
840 {"int_d", 0},
841 {"int_pls_pme", 0},
842 {"int_pme_legacy", 0},
843 {"int_pls_err", 0},
844 {"int_aer_legacy", 0},
845 {"int_pls_link_up", 0},
846 {"int_pls_link_down", 0},
847 {"int_bridge_flush_n", 0},
848 {"int_global_int", 0}
849};
850
851/* MSIs */
852static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
853 {"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
854 {"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
855 {"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
856 {"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
857 {"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
858 {"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
859 {"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
860 {"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
861};
862
863#ifdef CONFIG_ARM
864#define PCIE_BUS_PRIV_DATA(bus) \
865 (((struct pci_sys_data *)bus->sysdata)->private_data)
866
867static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
868
869static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
870{
871 msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
872 msm_pcie_sys_data[dev->rc_idx].private_data = dev;
873
874 return &msm_pcie_sys_data[dev->rc_idx];
875}
876
877static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
878{
879 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
880}
881#else
882#define PCIE_BUS_PRIV_DATA(bus) \
883 (struct msm_pcie_dev_t *)(bus->sysdata)
884
885static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
886{
887 return dev;
888}
889
890static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
891{
892}
893#endif
894
895static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value)
896{
897 writel_relaxed(value, base + offset);
898 /* ensure that changes propagated to the hardware */
899 wmb();
900}
901
902static inline void msm_pcie_write_reg_field(void *base, u32 offset,
903 const u32 mask, u32 val)
904{
905 u32 shift = find_first_bit((void *)&mask, 32);
906 u32 tmp = readl_relaxed(base + offset);
907
908 tmp &= ~mask; /* clear written bits */
909 val = tmp | (val << shift);
910 writel_relaxed(val, base + offset);
911 /* ensure that changes propagated to the hardware */
912 wmb();
913}
914
915static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
916 struct msm_pcie_clk_info_t *info)
917{
918 int ret;
919
920 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
921 if (ret)
922 PCIE_ERR(dev,
923 "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
924 dev->rc_idx, info->name, ret);
925 else
926 PCIE_DBG2(dev,
927 "PCIe: RC%d configured core memory for clk %s.\n",
928 dev->rc_idx, info->name);
929
930 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
931 if (ret)
932 PCIE_ERR(dev,
933 "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
934 dev->rc_idx, info->name, ret);
935 else
936 PCIE_DBG2(dev,
937 "PCIe: RC%d configured peripheral memory for clk %s.\n",
938 dev->rc_idx, info->name);
939}
940
941#if defined(CONFIG_ARCH_FSM9010)
942#define PCIE20_PARF_PHY_STTS 0x3c
943#define PCIE2_PHY_RESET_CTRL 0x44
944#define PCIE20_PARF_PHY_REFCLK_CTRL2 0xa0
945#define PCIE20_PARF_PHY_REFCLK_CTRL3 0xa4
946#define PCIE20_PARF_PCS_SWING_CTRL1 0x88
947#define PCIE20_PARF_PCS_SWING_CTRL2 0x8c
948#define PCIE20_PARF_PCS_DEEMPH1 0x74
949#define PCIE20_PARF_PCS_DEEMPH2 0x78
950#define PCIE20_PARF_PCS_DEEMPH3 0x7c
951#define PCIE20_PARF_CONFIGBITS 0x84
952#define PCIE20_PARF_PHY_CTRL3 0x94
953#define PCIE20_PARF_PCS_CTRL 0x80
954
955#define TX_AMP_VAL 127
956#define PHY_RX0_EQ_GEN1_VAL 0
957#define PHY_RX0_EQ_GEN2_VAL 4
958#define TX_DEEMPH_GEN1_VAL 24
959#define TX_DEEMPH_GEN2_3_5DB_VAL 24
960#define TX_DEEMPH_GEN2_6DB_VAL 34
961#define PHY_TX0_TERM_OFFST_VAL 0
962
963static inline void pcie_phy_dump(struct msm_pcie_dev_t *dev)
964{
965}
966
967static inline void pcie20_phy_reset(struct msm_pcie_dev_t *dev, uint32_t assert)
968{
969 msm_pcie_write_reg_field(dev->phy, PCIE2_PHY_RESET_CTRL,
970 BIT(0), (assert) ? 1 : 0);
971}
972
973static void pcie_phy_init(struct msm_pcie_dev_t *dev)
974{
975 PCIE_DBG(dev, "RC%d: Initializing 28LP SNS phy - 100MHz\n",
976 dev->rc_idx);
977
978 /* De-assert Phy SW Reset */
979 pcie20_phy_reset(dev, 1);
980
981 /* Program SSP ENABLE */
982 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL2) & BIT(0))
983 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2,
984 BIT(0), 0);
985 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL3) &
986 BIT(0)) == 0)
987 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL3,
988 BIT(0), 1);
989 /* Program Tx Amplitude */
990 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL1) &
991 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
992 TX_AMP_VAL)
993 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL1,
994 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
995 TX_AMP_VAL);
996 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL2) &
997 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
998 TX_AMP_VAL)
999 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL2,
1000 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1001 TX_AMP_VAL);
1002 /* Program De-Emphasis */
1003 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH1) &
1004 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1005 TX_DEEMPH_GEN2_6DB_VAL)
1006 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH1,
1007 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1008 TX_DEEMPH_GEN2_6DB_VAL);
1009
1010 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH2) &
1011 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1012 TX_DEEMPH_GEN2_3_5DB_VAL)
1013 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH2,
1014 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1015 TX_DEEMPH_GEN2_3_5DB_VAL);
1016
1017 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH3) &
1018 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1019 TX_DEEMPH_GEN1_VAL)
1020 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH3,
1021 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1022 TX_DEEMPH_GEN1_VAL);
1023
1024 /* Program Rx_Eq */
1025 if ((readl_relaxed(dev->phy + PCIE20_PARF_CONFIGBITS) &
1026 (BIT(2)|BIT(1)|BIT(0))) != PHY_RX0_EQ_GEN1_VAL)
1027 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_CONFIGBITS,
1028 BIT(2)|BIT(1)|BIT(0), PHY_RX0_EQ_GEN1_VAL);
1029
1030 /* Program Tx0_term_offset */
1031 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_CTRL3) &
1032 (BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1033 PHY_TX0_TERM_OFFST_VAL)
1034 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_CTRL3,
1035 BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1036 PHY_TX0_TERM_OFFST_VAL);
1037
1038 /* Program REF_CLK source */
1039 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2, BIT(1),
1040 (dev->ext_ref_clk) ? 1 : 0);
1041 /* disable Tx2Rx Loopback */
1042 if (readl_relaxed(dev->phy + PCIE20_PARF_PCS_CTRL) & BIT(1))
1043 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_CTRL,
1044 BIT(1), 0);
1045 /* De-assert Phy SW Reset */
1046 pcie20_phy_reset(dev, 0);
1047}
1048
1049static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1050{
1051
1052 /* read PCIE20_PARF_PHY_STTS twice */
1053 readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS);
1054 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS) & BIT(0))
1055 return false;
1056 else
1057 return true;
1058}
1059#else
1060static void pcie_phy_dump_test_cntrl(struct msm_pcie_dev_t *dev,
1061 u32 cntrl4_val, u32 cntrl5_val,
1062 u32 cntrl6_val, u32 cntrl7_val)
1063{
1064 msm_pcie_write_reg(dev->phy,
1065 PCIE_N_TEST_CONTROL4(dev->rc_idx, dev->common_phy), cntrl4_val);
1066 msm_pcie_write_reg(dev->phy,
1067 PCIE_N_TEST_CONTROL5(dev->rc_idx, dev->common_phy), cntrl5_val);
1068 msm_pcie_write_reg(dev->phy,
1069 PCIE_N_TEST_CONTROL6(dev->rc_idx, dev->common_phy), cntrl6_val);
1070 msm_pcie_write_reg(dev->phy,
1071 PCIE_N_TEST_CONTROL7(dev->rc_idx, dev->common_phy), cntrl7_val);
1072
1073 PCIE_DUMP(dev,
1074 "PCIe: RC%d PCIE_N_TEST_CONTROL4: 0x%x\n", dev->rc_idx,
1075 readl_relaxed(dev->phy +
1076 PCIE_N_TEST_CONTROL4(dev->rc_idx,
1077 dev->common_phy)));
1078 PCIE_DUMP(dev,
1079 "PCIe: RC%d PCIE_N_TEST_CONTROL5: 0x%x\n", dev->rc_idx,
1080 readl_relaxed(dev->phy +
1081 PCIE_N_TEST_CONTROL5(dev->rc_idx,
1082 dev->common_phy)));
1083 PCIE_DUMP(dev,
1084 "PCIe: RC%d PCIE_N_TEST_CONTROL6: 0x%x\n", dev->rc_idx,
1085 readl_relaxed(dev->phy +
1086 PCIE_N_TEST_CONTROL6(dev->rc_idx,
1087 dev->common_phy)));
1088 PCIE_DUMP(dev,
1089 "PCIe: RC%d PCIE_N_TEST_CONTROL7: 0x%x\n", dev->rc_idx,
1090 readl_relaxed(dev->phy +
1091 PCIE_N_TEST_CONTROL7(dev->rc_idx,
1092 dev->common_phy)));
1093 PCIE_DUMP(dev,
1094 "PCIe: RC%d PCIE_N_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rc_idx,
1095 readl_relaxed(dev->phy +
1096 PCIE_N_DEBUG_BUS_0_STATUS(dev->rc_idx,
1097 dev->common_phy)));
1098 PCIE_DUMP(dev,
1099 "PCIe: RC%d PCIE_N_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rc_idx,
1100 readl_relaxed(dev->phy +
1101 PCIE_N_DEBUG_BUS_1_STATUS(dev->rc_idx,
1102 dev->common_phy)));
1103 PCIE_DUMP(dev,
1104 "PCIe: RC%d PCIE_N_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rc_idx,
1105 readl_relaxed(dev->phy +
1106 PCIE_N_DEBUG_BUS_2_STATUS(dev->rc_idx,
1107 dev->common_phy)));
1108 PCIE_DUMP(dev,
1109 "PCIe: RC%d PCIE_N_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rc_idx,
1110 readl_relaxed(dev->phy +
1111 PCIE_N_DEBUG_BUS_3_STATUS(dev->rc_idx,
1112 dev->common_phy)));
1113}
1114
1115static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
1116{
1117 int i, size;
1118 u32 write_val;
1119
1120 if (dev->phy_ver >= 0x20) {
1121 PCIE_DUMP(dev, "PCIe: RC%d PHY dump is not supported\n",
1122 dev->rc_idx);
1123 return;
1124 }
1125
1126 PCIE_DUMP(dev, "PCIe: RC%d PHY testbus\n", dev->rc_idx);
1127
1128 pcie_phy_dump_test_cntrl(dev, 0x18, 0x19, 0x1A, 0x1B);
1129 pcie_phy_dump_test_cntrl(dev, 0x1C, 0x1D, 0x1E, 0x1F);
1130 pcie_phy_dump_test_cntrl(dev, 0x20, 0x21, 0x22, 0x23);
1131
1132 for (i = 0; i < 3; i++) {
1133 write_val = 0x1 + i;
1134 msm_pcie_write_reg(dev->phy,
1135 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1136 dev->common_phy), write_val);
1137 PCIE_DUMP(dev,
1138 "PCIe: RC%d QSERDES_TX_N_DEBUG_BUS_SEL: 0x%x\n",
1139 dev->rc_idx,
1140 readl_relaxed(dev->phy +
1141 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1142 dev->common_phy)));
1143
1144 pcie_phy_dump_test_cntrl(dev, 0x30, 0x31, 0x32, 0x33);
1145 }
1146
1147 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1148
1149 if (dev->phy_ver >= 0x10 && dev->phy_ver < 0x20) {
1150 pcie_phy_dump_test_cntrl(dev, 0x01, 0x02, 0x03, 0x0A);
1151 pcie_phy_dump_test_cntrl(dev, 0x0E, 0x0F, 0x12, 0x13);
1152 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1153
1154 for (i = 0; i < 8; i += 4) {
1155 write_val = 0x1 + i;
1156 msm_pcie_write_reg(dev->phy,
1157 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(dev->rc_idx,
1158 dev->common_phy), write_val);
1159 msm_pcie_write_reg(dev->phy,
1160 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(dev->rc_idx,
1161 dev->common_phy), write_val + 1);
1162 msm_pcie_write_reg(dev->phy,
1163 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(dev->rc_idx,
1164 dev->common_phy), write_val + 2);
1165 msm_pcie_write_reg(dev->phy,
1166 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(dev->rc_idx,
1167 dev->common_phy), write_val + 3);
1168
1169 PCIE_DUMP(dev,
1170 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1171 dev->rc_idx,
1172 readl_relaxed(dev->phy +
1173 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1174 dev->rc_idx, dev->common_phy)));
1175 PCIE_DUMP(dev,
1176 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1177 dev->rc_idx,
1178 readl_relaxed(dev->phy +
1179 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1180 dev->rc_idx, dev->common_phy)));
1181 PCIE_DUMP(dev,
1182 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1183 dev->rc_idx,
1184 readl_relaxed(dev->phy +
1185 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1186 dev->rc_idx, dev->common_phy)));
1187 PCIE_DUMP(dev,
1188 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1189 dev->rc_idx,
1190 readl_relaxed(dev->phy +
1191 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1192 dev->rc_idx, dev->common_phy)));
1193 PCIE_DUMP(dev,
1194 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_0_STATUS: 0x%x\n",
1195 dev->rc_idx,
1196 readl_relaxed(dev->phy +
1197 PCIE_MISC_N_DEBUG_BUS_0_STATUS(
1198 dev->rc_idx, dev->common_phy)));
1199 PCIE_DUMP(dev,
1200 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_1_STATUS: 0x%x\n",
1201 dev->rc_idx,
1202 readl_relaxed(dev->phy +
1203 PCIE_MISC_N_DEBUG_BUS_1_STATUS(
1204 dev->rc_idx, dev->common_phy)));
1205 PCIE_DUMP(dev,
1206 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_2_STATUS: 0x%x\n",
1207 dev->rc_idx,
1208 readl_relaxed(dev->phy +
1209 PCIE_MISC_N_DEBUG_BUS_2_STATUS(
1210 dev->rc_idx, dev->common_phy)));
1211 PCIE_DUMP(dev,
1212 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_3_STATUS: 0x%x\n",
1213 dev->rc_idx,
1214 readl_relaxed(dev->phy +
1215 PCIE_MISC_N_DEBUG_BUS_3_STATUS(
1216 dev->rc_idx, dev->common_phy)));
1217 }
1218
1219 msm_pcie_write_reg(dev->phy,
1220 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1221 dev->rc_idx, dev->common_phy), 0);
1222 msm_pcie_write_reg(dev->phy,
1223 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1224 dev->rc_idx, dev->common_phy), 0);
1225 msm_pcie_write_reg(dev->phy,
1226 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1227 dev->rc_idx, dev->common_phy), 0);
1228 msm_pcie_write_reg(dev->phy,
1229 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1230 dev->rc_idx, dev->common_phy), 0);
1231 }
1232
1233 for (i = 0; i < 2; i++) {
1234 write_val = 0x2 + i;
1235
1236 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
1237 write_val);
1238
1239 PCIE_DUMP(dev,
1240 "PCIe: RC%d to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
1241 dev->rc_idx,
1242 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
1243 PCIE_DUMP(dev,
1244 "PCIe: RC%d QSERDES_COM_DEBUG_BUS0: 0x%x\n",
1245 dev->rc_idx,
1246 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
1247 PCIE_DUMP(dev,
1248 "PCIe: RC%d QSERDES_COM_DEBUG_BUS1: 0x%x\n",
1249 dev->rc_idx,
1250 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
1251 PCIE_DUMP(dev,
1252 "PCIe: RC%d QSERDES_COM_DEBUG_BUS2: 0x%x\n",
1253 dev->rc_idx,
1254 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
1255 PCIE_DUMP(dev,
1256 "PCIe: RC%d QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
1257 dev->rc_idx,
1258 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
1259 }
1260
1261 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
1262
1263 if (dev->common_phy) {
1264 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1265 0x01);
1266 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE1_INDEX,
1267 0x02);
1268 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE2_INDEX,
1269 0x03);
1270 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE3_INDEX,
1271 0x04);
1272
1273 PCIE_DUMP(dev,
1274 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1275 dev->rc_idx,
1276 readl_relaxed(dev->phy +
1277 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1278 PCIE_DUMP(dev,
1279 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1280 dev->rc_idx,
1281 readl_relaxed(dev->phy +
1282 PCIE_COM_DEBUG_BUS_BYTE1_INDEX));
1283 PCIE_DUMP(dev,
1284 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1285 dev->rc_idx,
1286 readl_relaxed(dev->phy +
1287 PCIE_COM_DEBUG_BUS_BYTE2_INDEX));
1288 PCIE_DUMP(dev,
1289 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1290 dev->rc_idx,
1291 readl_relaxed(dev->phy +
1292 PCIE_COM_DEBUG_BUS_BYTE3_INDEX));
1293 PCIE_DUMP(dev,
1294 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n",
1295 dev->rc_idx,
1296 readl_relaxed(dev->phy +
1297 PCIE_COM_DEBUG_BUS_0_STATUS));
1298 PCIE_DUMP(dev,
1299 "PCIe: RC%d PCIE_COM_DEBUG_BUS_1_STATUS: 0x%x\n",
1300 dev->rc_idx,
1301 readl_relaxed(dev->phy +
1302 PCIE_COM_DEBUG_BUS_1_STATUS));
1303 PCIE_DUMP(dev,
1304 "PCIe: RC%d PCIE_COM_DEBUG_BUS_2_STATUS: 0x%x\n",
1305 dev->rc_idx,
1306 readl_relaxed(dev->phy +
1307 PCIE_COM_DEBUG_BUS_2_STATUS));
1308 PCIE_DUMP(dev,
1309 "PCIe: RC%d PCIE_COM_DEBUG_BUS_3_STATUS: 0x%x\n",
1310 dev->rc_idx,
1311 readl_relaxed(dev->phy +
1312 PCIE_COM_DEBUG_BUS_3_STATUS));
1313
1314 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1315 0x05);
1316
1317 PCIE_DUMP(dev,
1318 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1319 dev->rc_idx,
1320 readl_relaxed(dev->phy +
1321 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1322 PCIE_DUMP(dev,
1323 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n\n",
1324 dev->rc_idx,
1325 readl_relaxed(dev->phy +
1326 PCIE_COM_DEBUG_BUS_0_STATUS));
1327 }
1328
1329 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
1330 for (i = 0; i < size; i += 32) {
1331 PCIE_DUMP(dev,
1332 "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1333 dev->rc_idx, i,
1334 readl_relaxed(dev->phy + i),
1335 readl_relaxed(dev->phy + (i + 4)),
1336 readl_relaxed(dev->phy + (i + 8)),
1337 readl_relaxed(dev->phy + (i + 12)),
1338 readl_relaxed(dev->phy + (i + 16)),
1339 readl_relaxed(dev->phy + (i + 20)),
1340 readl_relaxed(dev->phy + (i + 24)),
1341 readl_relaxed(dev->phy + (i + 28)));
1342 }
1343}
1344
1345#ifdef CONFIG_ARCH_MDMCALIFORNIUM
1346static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1347{
1348 u8 common_phy;
1349
1350 PCIE_DBG(dev,
1351 "RC%d: Initializing MDM 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1352 dev->rc_idx);
1353
1354 if (dev->common_phy)
1355 common_phy = 1;
1356 else
1357 common_phy = 0;
1358
1359 msm_pcie_write_reg(dev->phy,
1360 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1361 0x01);
1362 msm_pcie_write_reg(dev->phy,
1363 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1364 0x03);
1365
1366 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
1367 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1368
1369 msm_pcie_write_reg(dev->phy,
1370 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy), 0x06);
1371
1372 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
1373 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1374 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1375 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1376 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1377 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1378 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1379 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1380 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
1381 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1382 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1383
1384 if (dev->tcsr) {
1385 PCIE_DBG(dev, "RC%d: TCSR PHY clock scheme is 0x%x\n",
1386 dev->rc_idx, readl_relaxed(dev->tcsr));
1387
1388 if (readl_relaxed(dev->tcsr) & (BIT(1) | BIT(0)))
1389 msm_pcie_write_reg(dev->phy,
1390 QSERDES_COM_SYSCLK_EN_SEL, 0x0A);
1391 else
1392 msm_pcie_write_reg(dev->phy,
1393 QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1394 }
1395
1396 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1397 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1398 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1399 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1400 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1401 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x0D);
1402 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x04);
1403 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1404 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1405 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1406 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1407 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1408 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1409 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1410 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1411
1412 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1413 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1414 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1415 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1416 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1417 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1418 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1419
1420 msm_pcie_write_reg(dev->phy,
1421 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1422 common_phy), 0x45);
1423
1424 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1425
1426 msm_pcie_write_reg(dev->phy,
1427 QSERDES_TX_N_RES_CODE_LANE_OFFSET(dev->rc_idx, common_phy),
1428 0x02);
1429 msm_pcie_write_reg(dev->phy,
1430 QSERDES_TX_N_RCV_DETECT_LVL_2(dev->rc_idx, common_phy),
1431 0x12);
1432
1433 msm_pcie_write_reg(dev->phy,
1434 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1435 0x1C);
1436 msm_pcie_write_reg(dev->phy,
1437 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1438 0x14);
1439 msm_pcie_write_reg(dev->phy,
1440 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1441 0x01);
1442 msm_pcie_write_reg(dev->phy,
1443 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1444 0x00);
1445 msm_pcie_write_reg(dev->phy,
1446 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1447 0xDB);
1448 msm_pcie_write_reg(dev->phy,
1449 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1450 common_phy),
1451 0x4B);
1452 msm_pcie_write_reg(dev->phy,
1453 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1454 0x04);
1455 msm_pcie_write_reg(dev->phy,
1456 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1457 0x04);
1458
1459 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1460
1461 msm_pcie_write_reg(dev->phy,
1462 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1463 0x04);
1464 msm_pcie_write_reg(dev->phy,
1465 PCIE_N_OSC_DTCT_ACTIONS(dev->rc_idx, common_phy),
1466 0x00);
1467 msm_pcie_write_reg(dev->phy,
1468 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1469 0x40);
1470 msm_pcie_write_reg(dev->phy,
1471 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1472 0x00);
1473 msm_pcie_write_reg(dev->phy,
1474 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(dev->rc_idx, common_phy),
1475 0x40);
1476 msm_pcie_write_reg(dev->phy,
1477 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1478 0x00);
1479 msm_pcie_write_reg(dev->phy,
1480 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1481 0x40);
1482 msm_pcie_write_reg(dev->phy,
1483 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1484 0x73);
1485 msm_pcie_write_reg(dev->phy,
1486 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1487 0x99);
1488 msm_pcie_write_reg(dev->phy,
1489 PCIE_N_TXDEEMPH_M6DB_V0(dev->rc_idx, common_phy),
1490 0x15);
1491 msm_pcie_write_reg(dev->phy,
1492 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1493 0x0E);
1494
1495 msm_pcie_write_reg(dev->phy,
1496 PCIE_N_SIGDET_CNTRL(dev->rc_idx, common_phy),
1497 0x07);
1498
1499 msm_pcie_write_reg(dev->phy,
1500 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1501 0x00);
1502 msm_pcie_write_reg(dev->phy,
1503 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1504 0x03);
1505}
1506
1507static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1508{
1509}
1510
1511static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1512{
1513 if (readl_relaxed(dev->phy +
1514 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) & BIT(6))
1515 return false;
1516 else
1517 return true;
1518}
1519#else
1520static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1521{
1522 int i;
1523 struct msm_pcie_phy_info_t *phy_seq;
1524
1525 PCIE_DBG(dev,
1526 "RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1527 dev->rc_idx);
1528
1529 if (dev->phy_sequence) {
1530 i = dev->phy_len;
1531 phy_seq = dev->phy_sequence;
1532 while (i--) {
1533 msm_pcie_write_reg(dev->phy,
1534 phy_seq->offset,
1535 phy_seq->val);
1536 if (phy_seq->delay)
1537 usleep_range(phy_seq->delay,
1538 phy_seq->delay + 1);
1539 phy_seq++;
1540 }
1541 return;
1542 }
1543
1544 if (dev->common_phy)
1545 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0x01);
1546
1547 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1C);
1548 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1549 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1550 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1551 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x42);
1552 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1553 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1554 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1555 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x01);
1556 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1557 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x00);
1558 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1559 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1560 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1561 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1562 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1563 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1564 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1565 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
1566 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
1567 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1568 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1569 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1570 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1571 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1572 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1573 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1574 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1575 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1576 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1577 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1578 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1579 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1580 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1581 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1582 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1583
1584 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
1585 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1586 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1587
1588 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1589 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1590
1591 if (dev->phy_ver == 0x3) {
1592 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1593 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x40);
1594 }
1595
1596 if (dev->common_phy) {
1597 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x00);
1598 msm_pcie_write_reg(dev->phy, PCIE_COM_START_CONTROL, 0x03);
1599 }
1600}
1601
1602static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1603{
1604 int i;
1605 struct msm_pcie_phy_info_t *phy_seq;
1606 u8 common_phy;
1607
1608 if (dev->phy_ver >= 0x20)
1609 return;
1610
1611 PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
1612
1613 if (dev->common_phy)
1614 common_phy = 1;
1615 else
1616 common_phy = 0;
1617
1618 if (dev->port_phy_sequence) {
1619 i = dev->port_phy_len;
1620 phy_seq = dev->port_phy_sequence;
1621 while (i--) {
1622 msm_pcie_write_reg(dev->phy,
1623 phy_seq->offset,
1624 phy_seq->val);
1625 if (phy_seq->delay)
1626 usleep_range(phy_seq->delay,
1627 phy_seq->delay + 1);
1628 phy_seq++;
1629 }
1630 return;
1631 }
1632
1633 msm_pcie_write_reg(dev->phy,
1634 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1635 common_phy), 0x45);
1636 msm_pcie_write_reg(dev->phy,
1637 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy),
1638 0x06);
1639
1640 msm_pcie_write_reg(dev->phy,
1641 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1642 0x1C);
1643 msm_pcie_write_reg(dev->phy,
1644 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1645 0x17);
1646 msm_pcie_write_reg(dev->phy,
1647 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1648 0x01);
1649 msm_pcie_write_reg(dev->phy,
1650 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1651 0x00);
1652 msm_pcie_write_reg(dev->phy,
1653 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1654 0xDB);
1655 msm_pcie_write_reg(dev->phy,
1656 QSERDES_RX_N_RX_BAND(dev->rc_idx, common_phy),
1657 0x18);
1658 msm_pcie_write_reg(dev->phy,
1659 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1660 0x04);
1661 msm_pcie_write_reg(dev->phy,
1662 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1663 0x04);
1664 msm_pcie_write_reg(dev->phy,
1665 PCIE_N_RX_IDLE_DTCT_CNTRL(dev->rc_idx, common_phy),
1666 0x4C);
1667 msm_pcie_write_reg(dev->phy,
1668 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1669 0x00);
1670 msm_pcie_write_reg(dev->phy,
1671 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1672 0x01);
1673 msm_pcie_write_reg(dev->phy,
1674 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1675 0x05);
1676 msm_pcie_write_reg(dev->phy,
1677 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1678 common_phy), 0x4B);
1679 msm_pcie_write_reg(dev->phy,
1680 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1681 0x14);
1682
1683 msm_pcie_write_reg(dev->phy,
1684 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1685 0x05);
1686 msm_pcie_write_reg(dev->phy,
1687 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1688 0x02);
1689 msm_pcie_write_reg(dev->phy,
1690 PCIE_N_POWER_STATE_CONFIG4(dev->rc_idx, common_phy),
1691 0x00);
1692 msm_pcie_write_reg(dev->phy,
1693 PCIE_N_POWER_STATE_CONFIG1(dev->rc_idx, common_phy),
1694 0xA3);
1695
1696 if (dev->phy_ver == 0x3) {
1697 msm_pcie_write_reg(dev->phy,
1698 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1699 0x19);
1700
1701 msm_pcie_write_reg(dev->phy,
1702 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1703 0x0E);
1704 }
1705
1706 msm_pcie_write_reg(dev->phy,
1707 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1708 0x03);
1709 usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
1710
1711 msm_pcie_write_reg(dev->phy,
1712 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1713 0x00);
1714 msm_pcie_write_reg(dev->phy,
1715 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1716 0x0A);
1717}
1718
1719static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1720{
1721 if (dev->phy_ver >= 0x20) {
1722 if (readl_relaxed(dev->phy +
1723 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) &
1724 BIT(6))
1725 return false;
1726 else
1727 return true;
1728 }
1729
1730 if (!(readl_relaxed(dev->phy + PCIE_COM_PCS_READY_STATUS) & 0x1))
1731 return false;
1732 else
1733 return true;
1734}
1735#endif
1736#endif
1737
1738static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
1739{
1740 int ret, scm_ret;
1741
1742 if (!dev) {
1743 pr_err("PCIe: the input pcie dev is NULL.\n");
1744 return -ENODEV;
1745 }
1746
1747 ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
1748 if (ret || scm_ret) {
1749 PCIE_ERR(dev,
1750 "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
1751 dev->rc_idx, ret, scm_ret);
1752 return ret ? ret : -EINVAL;
1753 }
1754
1755 return 0;
1756}
1757
1758static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
1759 u32 offset)
1760{
1761 if (offset % 4) {
1762 PCIE_ERR(dev,
1763 "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
1764 dev->rc_idx, offset);
1765 return MSM_PCIE_ERROR;
1766 }
1767
1768 return 0;
1769}
1770
1771static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
1772 bool check_sw_stts,
1773 bool check_ep,
1774 void __iomem *ep_conf)
1775{
1776 u32 val;
1777
1778 if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
1779 PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
1780 dev->rc_idx);
1781 return false;
1782 }
1783
1784 if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
1785 PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
1786 dev->rc_idx);
1787 return false;
1788 }
1789
1790 val = readl_relaxed(dev->dm_core);
1791 PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
1792 dev->rc_idx, val);
1793 if (val == PCIE_LINK_DOWN) {
1794 PCIE_ERR(dev,
1795 "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
1796 dev->rc_idx, dev->rc_idx, val);
1797 return false;
1798 }
1799
1800 if (check_ep) {
1801 val = readl_relaxed(ep_conf);
1802 PCIE_DBG(dev,
1803 "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
1804 dev->rc_idx, val);
1805 if (val == PCIE_LINK_DOWN) {
1806 PCIE_ERR(dev,
1807 "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
1808 dev->rc_idx, dev->rc_idx, val);
1809 return false;
1810 }
1811 }
1812
1813 return true;
1814}
1815
1816static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
1817{
1818 int i, j;
1819 u32 val = 0;
1820 u32 *shadow;
1821 void *cfg = dev->conf;
1822
1823 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1824 if (!rc && !dev->pcidev_table[i].bdf)
1825 break;
1826 if (rc) {
1827 cfg = dev->dm_core;
1828 shadow = dev->rc_shadow;
1829 } else {
1830 if (!msm_pcie_confirm_linkup(dev, false, true,
1831 dev->pcidev_table[i].conf_base))
1832 continue;
1833
1834 shadow = dev->ep_shadow[i];
1835 PCIE_DBG(dev,
1836 "PCIe Device: %02x:%02x.%01x\n",
1837 dev->pcidev_table[i].bdf >> 24,
1838 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1839 dev->pcidev_table[i].bdf >> 16 & 0x07);
1840 }
1841 for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
1842 val = shadow[j];
1843 if (val != PCIE_CLEAR) {
1844 PCIE_DBG3(dev,
1845 "PCIe: before recovery:cfg 0x%x:0x%x\n",
1846 j * 4, readl_relaxed(cfg + j * 4));
1847 PCIE_DBG3(dev,
1848 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1849 j, j * 4, val);
1850 writel_relaxed(val, cfg + j * 4);
1851 /* ensure changes propagated to the hardware */
1852 wmb();
1853 PCIE_DBG3(dev,
1854 "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
1855 j * 4, readl_relaxed(cfg + j * 4));
1856 }
1857 }
1858 if (rc)
1859 break;
1860
1861 pci_save_state(dev->pcidev_table[i].dev);
1862 cfg += SZ_4K;
1863 }
1864}
1865
1866static void msm_pcie_write_mask(void __iomem *addr,
1867 uint32_t clear_mask, uint32_t set_mask)
1868{
1869 uint32_t val;
1870
1871 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
1872 writel_relaxed(val, addr);
1873 wmb(); /* ensure data is written to hardware register */
1874}
1875
1876static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
1877{
1878 int i, size;
1879 u32 original;
1880
1881 PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
1882
1883 original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
1884 for (i = 1; i <= 0x1A; i++) {
1885 msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
1886 0xFF0000, i << 16);
1887 PCIE_DUMP(dev,
1888 "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
1889 dev->rc_idx,
1890 readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
1891 readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
1892 }
1893 writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
1894
1895 PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
1896
1897 size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
1898 for (i = 0; i < size; i += 32) {
1899 PCIE_DUMP(dev,
1900 "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1901 dev->rc_idx, i,
1902 readl_relaxed(dev->parf + i),
1903 readl_relaxed(dev->parf + (i + 4)),
1904 readl_relaxed(dev->parf + (i + 8)),
1905 readl_relaxed(dev->parf + (i + 12)),
1906 readl_relaxed(dev->parf + (i + 16)),
1907 readl_relaxed(dev->parf + (i + 20)),
1908 readl_relaxed(dev->parf + (i + 24)),
1909 readl_relaxed(dev->parf + (i + 28)));
1910 }
1911}
1912
1913static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
1914{
1915 PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
1916 dev->rc_idx, dev->enumerated ? "" : "not");
1917 PCIE_DBG_FS(dev, "PCIe: link is %s\n",
1918 (dev->link_status == MSM_PCIE_LINK_ENABLED)
1919 ? "enabled" : "disabled");
1920 PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
1921 dev->cfg_access ? "" : "not");
1922 PCIE_DBG_FS(dev, "use_msi is %d\n",
1923 dev->use_msi);
1924 PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
1925 dev->use_pinctrl);
1926 PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
1927 dev->use_19p2mhz_aux_clk);
1928 PCIE_DBG_FS(dev, "user_suspend is %d\n",
1929 dev->user_suspend);
1930 PCIE_DBG_FS(dev, "num_ep: %d\n",
1931 dev->num_ep);
1932 PCIE_DBG_FS(dev, "num_active_ep: %d\n",
1933 dev->num_active_ep);
1934 PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
1935 dev->pending_ep_reg ? "true" : "false");
1936 PCIE_DBG_FS(dev, "phy_len is %d",
1937 dev->phy_len);
1938 PCIE_DBG_FS(dev, "port_phy_len is %d",
1939 dev->port_phy_len);
1940 PCIE_DBG_FS(dev, "disable_pc is %d",
1941 dev->disable_pc);
1942 PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
1943 dev->l0s_supported ? "" : "not");
1944 PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
1945 dev->l1_supported ? "" : "not");
1946 PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
1947 dev->l1ss_supported ? "" : "not");
1948 PCIE_DBG_FS(dev, "common_clk_en is %d\n",
1949 dev->common_clk_en);
1950 PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
1951 dev->clk_power_manage_en);
1952 PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
1953 dev->aux_clk_sync);
1954 PCIE_DBG_FS(dev, "AER is %s enable\n",
1955 dev->aer_enable ? "" : "not");
1956 PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
1957 dev->ext_ref_clk);
Tony Truong9f2c7722017-02-28 15:02:27 -08001958 PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
1959 dev->boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07001960 PCIE_DBG_FS(dev, "phy_ver is %d\n",
1961 dev->phy_ver);
1962 PCIE_DBG_FS(dev, "drv_ready is %d\n",
1963 dev->drv_ready);
1964 PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
1965 dev->linkdown_panic);
1966 PCIE_DBG_FS(dev, "the link is %s suspending\n",
1967 dev->suspending ? "" : "not");
1968 PCIE_DBG_FS(dev, "shadow is %s enabled\n",
1969 dev->shadow_en ? "" : "not");
1970 PCIE_DBG_FS(dev, "the power of RC is %s on\n",
1971 dev->power_on ? "" : "not");
1972 PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
1973 dev->msi_gicm_addr);
1974 PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
1975 dev->msi_gicm_base);
1976 PCIE_DBG_FS(dev, "bus_client: %d\n",
1977 dev->bus_client);
1978 PCIE_DBG_FS(dev, "current short bdf: %d\n",
1979 dev->current_short_bdf);
1980 PCIE_DBG_FS(dev, "smmu does %s exist\n",
1981 dev->smmu_exist ? "" : "not");
1982 PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
1983 dev->smmu_sid_base);
1984 PCIE_DBG_FS(dev, "n_fts: %d\n",
1985 dev->n_fts);
1986 PCIE_DBG_FS(dev, "common_phy: %d\n",
1987 dev->common_phy);
1988 PCIE_DBG_FS(dev, "ep_latency: %dms\n",
1989 dev->ep_latency);
1990 PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
1991 dev->wr_halt_size);
1992 PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
1993 dev->cpl_timeout);
1994 PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
1995 dev->current_bdf);
1996 PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
1997 dev->perst_delay_us_min);
1998 PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
1999 dev->perst_delay_us_max);
2000 PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
2001 dev->tlp_rd_size);
2002 PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
2003 dev->rc_corr_counter);
2004 PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
2005 dev->rc_non_fatal_counter);
2006 PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
2007 dev->rc_fatal_counter);
2008 PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
2009 dev->ep_corr_counter);
2010 PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
2011 dev->ep_non_fatal_counter);
2012 PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
2013 dev->ep_fatal_counter);
2014 PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
2015 dev->linkdown_counter);
2016 PCIE_DBG_FS(dev, "wake_counter: %lu\n",
2017 dev->wake_counter);
2018 PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
2019 dev->link_turned_on_counter);
2020 PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
2021 dev->link_turned_off_counter);
2022}
2023
2024static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
2025{
2026 int i, j;
2027 u32 val = 0;
2028 u32 *shadow;
2029
2030 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2031 if (!rc && !dev->pcidev_table[i].bdf)
2032 break;
2033 if (rc) {
2034 shadow = dev->rc_shadow;
2035 } else {
2036 shadow = dev->ep_shadow[i];
2037 PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
2038 dev->pcidev_table[i].bdf >> 24,
2039 dev->pcidev_table[i].bdf >> 19 & 0x1f,
2040 dev->pcidev_table[i].bdf >> 16 & 0x07);
2041 }
2042 for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
2043 val = shadow[j];
2044 if (val != PCIE_CLEAR) {
2045 PCIE_DBG_FS(dev,
2046 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
2047 j, j * 4, val);
2048 }
2049 }
2050 if (rc)
2051 break;
2052 }
2053}
2054
2055static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
2056 u32 testcase)
2057{
2058 int ret, i;
2059 u32 base_sel_size = 0;
2060 u32 val = 0;
2061 u32 current_offset = 0;
2062 u32 ep_l1sub_ctrl1_offset = 0;
2063 u32 ep_l1sub_cap_reg1_offset = 0;
2064 u32 ep_link_ctrlstts_offset = 0;
2065 u32 ep_dev_ctrl2stts2_offset = 0;
2066
2067 if (testcase >= 5 && testcase <= 10) {
2068 current_offset =
2069 readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
2070
2071 while (current_offset) {
2072 val = readl_relaxed(dev->conf + current_offset);
2073 if ((val & 0xff) == PCIE20_CAP_ID) {
2074 ep_link_ctrlstts_offset = current_offset +
2075 0x10;
2076 ep_dev_ctrl2stts2_offset = current_offset +
2077 0x28;
2078 break;
2079 }
2080 current_offset = (val >> 8) & 0xff;
2081 }
2082
2083 if (!ep_link_ctrlstts_offset)
2084 PCIE_DBG(dev,
2085 "RC%d endpoint does not support PCIe capability registers\n",
2086 dev->rc_idx);
2087 else
2088 PCIE_DBG(dev,
2089 "RC%d: ep_link_ctrlstts_offset: 0x%x\n",
2090 dev->rc_idx, ep_link_ctrlstts_offset);
2091 }
2092
2093 switch (testcase) {
2094 case 0: /* output status */
2095 PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
2096 dev->rc_idx);
2097 msm_pcie_show_status(dev);
2098 break;
2099 case 1: /* disable link */
2100 PCIE_DBG_FS(dev,
2101 "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
2102 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2103 dev->dev, NULL,
2104 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2105 if (ret)
2106 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2107 __func__);
2108 else
2109 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
2110 __func__);
2111 break;
2112 case 2: /* enable link and recover config space for RC and EP */
2113 PCIE_DBG_FS(dev,
2114 "\n\nPCIe: RC%d: enable link and recover config space\n\n",
2115 dev->rc_idx);
2116 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2117 dev->dev, NULL,
2118 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2119 if (ret)
2120 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2121 __func__);
2122 else {
2123 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2124 msm_pcie_recover_config(dev->dev);
2125 }
2126 break;
2127 case 3: /*
2128 * disable and enable link, recover config space for
2129 * RC and EP
2130 */
2131 PCIE_DBG_FS(dev,
2132 "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
2133 dev->rc_idx);
2134 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2135 dev->dev, NULL,
2136 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2137 if (ret)
2138 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2139 __func__);
2140 else
2141 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
2142 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2143 dev->dev, NULL,
2144 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2145 if (ret)
2146 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2147 __func__);
2148 else {
2149 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2150 msm_pcie_recover_config(dev->dev);
2151 }
2152 break;
2153 case 4: /* dump shadow registers for RC and EP */
2154 PCIE_DBG_FS(dev,
2155 "\n\nPCIe: RC%d: dumping RC shadow registers\n",
2156 dev->rc_idx);
2157 msm_pcie_shadow_dump(dev, true);
2158
2159 PCIE_DBG_FS(dev,
2160 "\n\nPCIe: RC%d: dumping EP shadow registers\n",
2161 dev->rc_idx);
2162 msm_pcie_shadow_dump(dev, false);
2163 break;
2164 case 5: /* disable L0s */
2165 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
2166 dev->rc_idx);
2167 msm_pcie_write_mask(dev->dm_core +
2168 PCIE20_CAP_LINKCTRLSTATUS,
2169 BIT(0), 0);
2170 msm_pcie_write_mask(dev->conf +
2171 ep_link_ctrlstts_offset,
2172 BIT(0), 0);
2173 if (dev->shadow_en) {
2174 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2175 readl_relaxed(dev->dm_core +
2176 PCIE20_CAP_LINKCTRLSTATUS);
2177 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2178 readl_relaxed(dev->conf +
2179 ep_link_ctrlstts_offset);
2180 }
2181 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2182 readl_relaxed(dev->dm_core +
2183 PCIE20_CAP_LINKCTRLSTATUS));
2184 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2185 readl_relaxed(dev->conf +
2186 ep_link_ctrlstts_offset));
2187 break;
2188 case 6: /* enable L0s */
2189 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
2190 dev->rc_idx);
2191 msm_pcie_write_mask(dev->dm_core +
2192 PCIE20_CAP_LINKCTRLSTATUS,
2193 0, BIT(0));
2194 msm_pcie_write_mask(dev->conf +
2195 ep_link_ctrlstts_offset,
2196 0, BIT(0));
2197 if (dev->shadow_en) {
2198 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2199 readl_relaxed(dev->dm_core +
2200 PCIE20_CAP_LINKCTRLSTATUS);
2201 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2202 readl_relaxed(dev->conf +
2203 ep_link_ctrlstts_offset);
2204 }
2205 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2206 readl_relaxed(dev->dm_core +
2207 PCIE20_CAP_LINKCTRLSTATUS));
2208 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2209 readl_relaxed(dev->conf +
2210 ep_link_ctrlstts_offset));
2211 break;
2212 case 7: /* disable L1 */
2213 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
2214 dev->rc_idx);
2215 msm_pcie_write_mask(dev->dm_core +
2216 PCIE20_CAP_LINKCTRLSTATUS,
2217 BIT(1), 0);
2218 msm_pcie_write_mask(dev->conf +
2219 ep_link_ctrlstts_offset,
2220 BIT(1), 0);
2221 if (dev->shadow_en) {
2222 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2223 readl_relaxed(dev->dm_core +
2224 PCIE20_CAP_LINKCTRLSTATUS);
2225 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2226 readl_relaxed(dev->conf +
2227 ep_link_ctrlstts_offset);
2228 }
2229 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2230 readl_relaxed(dev->dm_core +
2231 PCIE20_CAP_LINKCTRLSTATUS));
2232 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2233 readl_relaxed(dev->conf +
2234 ep_link_ctrlstts_offset));
2235 break;
2236 case 8: /* enable L1 */
2237 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
2238 dev->rc_idx);
2239 msm_pcie_write_mask(dev->dm_core +
2240 PCIE20_CAP_LINKCTRLSTATUS,
2241 0, BIT(1));
2242 msm_pcie_write_mask(dev->conf +
2243 ep_link_ctrlstts_offset,
2244 0, BIT(1));
2245 if (dev->shadow_en) {
2246 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2247 readl_relaxed(dev->dm_core +
2248 PCIE20_CAP_LINKCTRLSTATUS);
2249 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2250 readl_relaxed(dev->conf +
2251 ep_link_ctrlstts_offset);
2252 }
2253 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2254 readl_relaxed(dev->dm_core +
2255 PCIE20_CAP_LINKCTRLSTATUS));
2256 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2257 readl_relaxed(dev->conf +
2258 ep_link_ctrlstts_offset));
2259 break;
2260 case 9: /* disable L1ss */
2261 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
2262 dev->rc_idx);
2263 current_offset = PCIE_EXT_CAP_OFFSET;
2264 while (current_offset) {
2265 val = readl_relaxed(dev->conf + current_offset);
2266 if ((val & 0xffff) == L1SUB_CAP_ID) {
2267 ep_l1sub_ctrl1_offset =
2268 current_offset + 0x8;
2269 break;
2270 }
2271 current_offset = val >> 20;
2272 }
2273 if (!ep_l1sub_ctrl1_offset) {
2274 PCIE_DBG_FS(dev,
2275 "PCIe: RC%d endpoint does not support l1ss registers\n",
2276 dev->rc_idx);
2277 break;
2278 }
2279
2280 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2281 dev->rc_idx, ep_l1sub_ctrl1_offset);
2282
2283 msm_pcie_write_reg_field(dev->dm_core,
2284 PCIE20_L1SUB_CONTROL1,
2285 0xf, 0);
2286 msm_pcie_write_mask(dev->dm_core +
2287 PCIE20_DEVICE_CONTROL2_STATUS2,
2288 BIT(10), 0);
2289 msm_pcie_write_reg_field(dev->conf,
2290 ep_l1sub_ctrl1_offset,
2291 0xf, 0);
2292 msm_pcie_write_mask(dev->conf +
2293 ep_dev_ctrl2stts2_offset,
2294 BIT(10), 0);
2295 if (dev->shadow_en) {
2296 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2297 readl_relaxed(dev->dm_core +
2298 PCIE20_L1SUB_CONTROL1);
2299 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2300 readl_relaxed(dev->dm_core +
2301 PCIE20_DEVICE_CONTROL2_STATUS2);
2302 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2303 readl_relaxed(dev->conf +
2304 ep_l1sub_ctrl1_offset);
2305 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2306 readl_relaxed(dev->conf +
2307 ep_dev_ctrl2stts2_offset);
2308 }
2309 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2310 readl_relaxed(dev->dm_core +
2311 PCIE20_L1SUB_CONTROL1));
2312 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2313 readl_relaxed(dev->dm_core +
2314 PCIE20_DEVICE_CONTROL2_STATUS2));
2315 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2316 readl_relaxed(dev->conf +
2317 ep_l1sub_ctrl1_offset));
2318 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2319 readl_relaxed(dev->conf +
2320 ep_dev_ctrl2stts2_offset));
2321 break;
2322 case 10: /* enable L1ss */
2323 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
2324 dev->rc_idx);
2325 current_offset = PCIE_EXT_CAP_OFFSET;
2326 while (current_offset) {
2327 val = readl_relaxed(dev->conf + current_offset);
2328 if ((val & 0xffff) == L1SUB_CAP_ID) {
2329 ep_l1sub_cap_reg1_offset =
2330 current_offset + 0x4;
2331 ep_l1sub_ctrl1_offset =
2332 current_offset + 0x8;
2333 break;
2334 }
2335 current_offset = val >> 20;
2336 }
2337 if (!ep_l1sub_ctrl1_offset) {
2338 PCIE_DBG_FS(dev,
2339 "PCIe: RC%d endpoint does not support l1ss registers\n",
2340 dev->rc_idx);
2341 break;
2342 }
2343
2344 val = readl_relaxed(dev->conf +
2345 ep_l1sub_cap_reg1_offset);
2346
2347 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
2348 val);
2349 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2350 dev->rc_idx, ep_l1sub_ctrl1_offset);
2351
2352 val &= 0xf;
2353
2354 msm_pcie_write_reg_field(dev->dm_core,
2355 PCIE20_L1SUB_CONTROL1,
2356 0xf, val);
2357 msm_pcie_write_mask(dev->dm_core +
2358 PCIE20_DEVICE_CONTROL2_STATUS2,
2359 0, BIT(10));
2360 msm_pcie_write_reg_field(dev->conf,
2361 ep_l1sub_ctrl1_offset,
2362 0xf, val);
2363 msm_pcie_write_mask(dev->conf +
2364 ep_dev_ctrl2stts2_offset,
2365 0, BIT(10));
2366 if (dev->shadow_en) {
2367 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2368 readl_relaxed(dev->dm_core +
2369 PCIE20_L1SUB_CONTROL1);
2370 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2371 readl_relaxed(dev->dm_core +
2372 PCIE20_DEVICE_CONTROL2_STATUS2);
2373 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2374 readl_relaxed(dev->conf +
2375 ep_l1sub_ctrl1_offset);
2376 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2377 readl_relaxed(dev->conf +
2378 ep_dev_ctrl2stts2_offset);
2379 }
2380 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2381 readl_relaxed(dev->dm_core +
2382 PCIE20_L1SUB_CONTROL1));
2383 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2384 readl_relaxed(dev->dm_core +
2385 PCIE20_DEVICE_CONTROL2_STATUS2));
2386 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2387 readl_relaxed(dev->conf +
2388 ep_l1sub_ctrl1_offset));
2389 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2390 readl_relaxed(dev->conf +
2391 ep_dev_ctrl2stts2_offset));
2392 break;
2393 case 11: /* enumerate PCIe */
2394 PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
2395 dev->rc_idx);
2396 if (dev->enumerated)
2397 PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
2398 dev->rc_idx);
2399 else {
2400 if (!msm_pcie_enumerate(dev->rc_idx))
2401 PCIE_DBG_FS(dev,
2402 "PCIe: RC%d is successfully enumerated\n",
2403 dev->rc_idx);
2404 else
2405 PCIE_DBG_FS(dev,
2406 "PCIe: RC%d enumeration failed\n",
2407 dev->rc_idx);
2408 }
2409 break;
2410 case 12: /* write a value to a register */
2411 PCIE_DBG_FS(dev,
2412 "\n\nPCIe: RC%d: writing a value to a register\n\n",
2413 dev->rc_idx);
2414
2415 if (!base_sel) {
2416 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2417 break;
2418 }
2419
2420 PCIE_DBG_FS(dev,
2421 "base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
2422 dev->res[base_sel - 1].name,
2423 dev->res[base_sel - 1].base,
2424 wr_offset, wr_mask, wr_value);
2425
Tony Truong95747382017-01-06 14:03:03 -08002426 base_sel_size = resource_size(dev->res[base_sel - 1].resource);
2427
2428 if (wr_offset > base_sel_size - 4 ||
2429 msm_pcie_check_align(dev, wr_offset))
2430 PCIE_DBG_FS(dev,
2431 "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
2432 dev->rc_idx, wr_offset, base_sel_size - 4);
2433 else
2434 msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
2435 wr_offset, wr_mask, wr_value);
Tony Truong349ee492014-10-01 17:35:56 -07002436
2437 break;
2438 case 13: /* dump all registers of base_sel */
2439 if (!base_sel) {
2440 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2441 break;
2442 } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
2443 pcie_parf_dump(dev);
2444 break;
2445 } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
2446 pcie_phy_dump(dev);
2447 break;
2448 } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
2449 base_sel_size = 0x1000;
2450 } else {
2451 base_sel_size = resource_size(
2452 dev->res[base_sel - 1].resource);
2453 }
2454
2455 PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
2456 dev->res[base_sel - 1].name, dev->rc_idx);
2457
2458 for (i = 0; i < base_sel_size; i += 32) {
2459 PCIE_DBG_FS(dev,
2460 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2461 i, readl_relaxed(dev->res[base_sel - 1].base + i),
2462 readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
2463 readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
2464 readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
2465 readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
2466 readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
2467 readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
2468 readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
2469 }
2470 break;
2471 default:
2472 PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
2473 break;
2474 }
2475}
2476
2477int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
2478 u32 offset, u32 mask, u32 value)
2479{
2480 int ret = 0;
2481 struct msm_pcie_dev_t *pdev = NULL;
2482
2483 if (!dev) {
2484 pr_err("PCIe: the input pci dev is NULL.\n");
2485 return -ENODEV;
2486 }
2487
2488 if (option == 12 || option == 13) {
2489 if (!base || base > 5) {
2490 PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
2491 PCIE_DBG_FS(pdev,
2492 "PCIe: base_sel is still 0x%x\n", base_sel);
2493 return -EINVAL;
2494 }
2495
2496 base_sel = base;
2497 PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
2498
2499 if (option == 12) {
2500 wr_offset = offset;
2501 wr_mask = mask;
2502 wr_value = value;
2503
2504 PCIE_DBG_FS(pdev,
2505 "PCIe: wr_offset is now 0x%x\n", wr_offset);
2506 PCIE_DBG_FS(pdev,
2507 "PCIe: wr_mask is now 0x%x\n", wr_mask);
2508 PCIE_DBG_FS(pdev,
2509 "PCIe: wr_value is now 0x%x\n", wr_value);
2510 }
2511 }
2512
2513 pdev = PCIE_BUS_PRIV_DATA(dev->bus);
2514 rc_sel = 1 << pdev->rc_idx;
2515
2516 msm_pcie_sel_debug_testcase(pdev, option);
2517
2518 return ret;
2519}
2520EXPORT_SYMBOL(msm_pcie_debug_info);
2521
Tony Truongbd9a3412017-02-27 18:30:13 -08002522#ifdef CONFIG_SYSFS
2523static ssize_t msm_pcie_enumerate_store(struct device *dev,
2524 struct device_attribute *attr,
2525 const char *buf, size_t count)
2526{
2527 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
2528 dev_get_drvdata(dev);
2529
2530 if (pcie_dev)
2531 msm_pcie_enumerate(pcie_dev->rc_idx);
2532
2533 return count;
2534}
2535
2536static DEVICE_ATTR(enumerate, 0200, NULL, msm_pcie_enumerate_store);
2537
2538static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
2539{
2540 int ret;
2541
2542 ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
2543 if (ret)
2544 PCIE_DBG_FS(dev,
2545 "RC%d: failed to create sysfs enumerate node\n",
2546 dev->rc_idx);
2547}
2548
2549static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2550{
2551 if (dev->pdev)
2552 device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
2553}
2554#else
2555static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
2556{
2557}
2558
2559static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2560{
2561}
2562#endif
2563
Tony Truong349ee492014-10-01 17:35:56 -07002564#ifdef CONFIG_DEBUG_FS
2565static struct dentry *dent_msm_pcie;
2566static struct dentry *dfile_rc_sel;
2567static struct dentry *dfile_case;
2568static struct dentry *dfile_base_sel;
2569static struct dentry *dfile_linkdown_panic;
2570static struct dentry *dfile_wr_offset;
2571static struct dentry *dfile_wr_mask;
2572static struct dentry *dfile_wr_value;
Tony Truong9f2c7722017-02-28 15:02:27 -08002573static struct dentry *dfile_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002574static struct dentry *dfile_aer_enable;
2575static struct dentry *dfile_corr_counter_limit;
2576
2577static u32 rc_sel_max;
2578
2579static ssize_t msm_pcie_cmd_debug(struct file *file,
2580 const char __user *buf,
2581 size_t count, loff_t *ppos)
2582{
2583 unsigned long ret;
2584 char str[MAX_MSG_LEN];
2585 unsigned int testcase = 0;
2586 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002587 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002588
Tony Truongfdbd5672017-01-06 16:23:14 -08002589 memset(str, 0, size);
2590 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002591 if (ret)
2592 return -EFAULT;
2593
Tony Truongfdbd5672017-01-06 16:23:14 -08002594 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002595 testcase = (testcase * 10) + (str[i] - '0');
2596
2597 if (!rc_sel)
2598 rc_sel = 1;
2599
2600 pr_alert("PCIe: TEST: %d\n", testcase);
2601
2602 for (i = 0; i < MAX_RC_NUM; i++) {
2603 if (!((rc_sel >> i) & 0x1))
2604 continue;
2605 msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
2606 }
2607
2608 return count;
2609}
2610
2611const struct file_operations msm_pcie_cmd_debug_ops = {
2612 .write = msm_pcie_cmd_debug,
2613};
2614
2615static ssize_t msm_pcie_set_rc_sel(struct file *file,
2616 const char __user *buf,
2617 size_t count, loff_t *ppos)
2618{
2619 unsigned long ret;
2620 char str[MAX_MSG_LEN];
2621 int i;
2622 u32 new_rc_sel = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002623 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002624
Tony Truongfdbd5672017-01-06 16:23:14 -08002625 memset(str, 0, size);
2626 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002627 if (ret)
2628 return -EFAULT;
2629
Tony Truongfdbd5672017-01-06 16:23:14 -08002630 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002631 new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
2632
2633 if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
2634 pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
2635 pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
2636 } else {
2637 rc_sel = new_rc_sel;
2638 pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
2639 }
2640
2641 pr_alert("PCIe: the following RC(s) will be tested:\n");
2642 for (i = 0; i < MAX_RC_NUM; i++) {
2643 if (!rc_sel) {
2644 pr_alert("RC %d\n", i);
2645 break;
2646 } else if (rc_sel & (1 << i)) {
2647 pr_alert("RC %d\n", i);
2648 }
2649 }
2650
2651 return count;
2652}
2653
2654const struct file_operations msm_pcie_rc_sel_ops = {
2655 .write = msm_pcie_set_rc_sel,
2656};
2657
2658static ssize_t msm_pcie_set_base_sel(struct file *file,
2659 const char __user *buf,
2660 size_t count, loff_t *ppos)
2661{
2662 unsigned long ret;
2663 char str[MAX_MSG_LEN];
2664 int i;
2665 u32 new_base_sel = 0;
2666 char *base_sel_name;
Tony Truongfdbd5672017-01-06 16:23:14 -08002667 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002668
Tony Truongfdbd5672017-01-06 16:23:14 -08002669 memset(str, 0, size);
2670 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002671 if (ret)
2672 return -EFAULT;
2673
Tony Truongfdbd5672017-01-06 16:23:14 -08002674 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002675 new_base_sel = (new_base_sel * 10) + (str[i] - '0');
2676
2677 if (!new_base_sel || new_base_sel > 5) {
2678 pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
2679 new_base_sel);
2680 pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
2681 } else {
2682 base_sel = new_base_sel;
2683 pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
2684 }
2685
2686 switch (base_sel) {
2687 case 1:
2688 base_sel_name = "PARF";
2689 break;
2690 case 2:
2691 base_sel_name = "PHY";
2692 break;
2693 case 3:
2694 base_sel_name = "RC CONFIG SPACE";
2695 break;
2696 case 4:
2697 base_sel_name = "ELBI";
2698 break;
2699 case 5:
2700 base_sel_name = "EP CONFIG SPACE";
2701 break;
2702 default:
2703 base_sel_name = "INVALID";
2704 break;
2705 }
2706
2707 pr_alert("%s\n", base_sel_name);
2708
2709 return count;
2710}
2711
2712const struct file_operations msm_pcie_base_sel_ops = {
2713 .write = msm_pcie_set_base_sel,
2714};
2715
2716static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
2717 const char __user *buf,
2718 size_t count, loff_t *ppos)
2719{
2720 unsigned long ret;
2721 char str[MAX_MSG_LEN];
2722 u32 new_linkdown_panic = 0;
2723 int i;
2724
2725 memset(str, 0, sizeof(str));
2726 ret = copy_from_user(str, buf, sizeof(str));
2727 if (ret)
2728 return -EFAULT;
2729
2730 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2731 new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
2732
2733 if (new_linkdown_panic <= 1) {
2734 for (i = 0; i < MAX_RC_NUM; i++) {
2735 if (!rc_sel) {
2736 msm_pcie_dev[0].linkdown_panic =
2737 new_linkdown_panic;
2738 PCIE_DBG_FS(&msm_pcie_dev[0],
2739 "PCIe: RC0: linkdown_panic is now %d\n",
2740 msm_pcie_dev[0].linkdown_panic);
2741 break;
2742 } else if (rc_sel & (1 << i)) {
2743 msm_pcie_dev[i].linkdown_panic =
2744 new_linkdown_panic;
2745 PCIE_DBG_FS(&msm_pcie_dev[i],
2746 "PCIe: RC%d: linkdown_panic is now %d\n",
2747 i, msm_pcie_dev[i].linkdown_panic);
2748 }
2749 }
2750 } else {
2751 pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
2752 new_linkdown_panic);
2753 }
2754
2755 return count;
2756}
2757
2758const struct file_operations msm_pcie_linkdown_panic_ops = {
2759 .write = msm_pcie_set_linkdown_panic,
2760};
2761
2762static ssize_t msm_pcie_set_wr_offset(struct file *file,
2763 const char __user *buf,
2764 size_t count, loff_t *ppos)
2765{
2766 unsigned long ret;
2767 char str[MAX_MSG_LEN];
2768 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002769 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002770
Tony Truongfdbd5672017-01-06 16:23:14 -08002771 memset(str, 0, size);
2772 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002773 if (ret)
2774 return -EFAULT;
2775
2776 wr_offset = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002777 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002778 wr_offset = (wr_offset * 10) + (str[i] - '0');
2779
2780 pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
2781
2782 return count;
2783}
2784
2785const struct file_operations msm_pcie_wr_offset_ops = {
2786 .write = msm_pcie_set_wr_offset,
2787};
2788
2789static ssize_t msm_pcie_set_wr_mask(struct file *file,
2790 const char __user *buf,
2791 size_t count, loff_t *ppos)
2792{
2793 unsigned long ret;
2794 char str[MAX_MSG_LEN];
2795 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002796 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002797
Tony Truongfdbd5672017-01-06 16:23:14 -08002798 memset(str, 0, size);
2799 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002800 if (ret)
2801 return -EFAULT;
2802
2803 wr_mask = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002804 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002805 wr_mask = (wr_mask * 10) + (str[i] - '0');
2806
2807 pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
2808
2809 return count;
2810}
2811
2812const struct file_operations msm_pcie_wr_mask_ops = {
2813 .write = msm_pcie_set_wr_mask,
2814};
2815static ssize_t msm_pcie_set_wr_value(struct file *file,
2816 const char __user *buf,
2817 size_t count, loff_t *ppos)
2818{
2819 unsigned long ret;
2820 char str[MAX_MSG_LEN];
2821 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002822 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002823
Tony Truongfdbd5672017-01-06 16:23:14 -08002824 memset(str, 0, size);
2825 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002826 if (ret)
2827 return -EFAULT;
2828
2829 wr_value = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002830 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002831 wr_value = (wr_value * 10) + (str[i] - '0');
2832
2833 pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
2834
2835 return count;
2836}
2837
2838const struct file_operations msm_pcie_wr_value_ops = {
2839 .write = msm_pcie_set_wr_value,
2840};
2841
Tony Truong9f2c7722017-02-28 15:02:27 -08002842static ssize_t msm_pcie_set_boot_option(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002843 const char __user *buf,
2844 size_t count, loff_t *ppos)
2845{
2846 unsigned long ret;
2847 char str[MAX_MSG_LEN];
Tony Truong9f2c7722017-02-28 15:02:27 -08002848 u32 new_boot_option = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002849 int i;
2850
2851 memset(str, 0, sizeof(str));
2852 ret = copy_from_user(str, buf, sizeof(str));
2853 if (ret)
2854 return -EFAULT;
2855
2856 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong9f2c7722017-02-28 15:02:27 -08002857 new_boot_option = (new_boot_option * 10) + (str[i] - '0');
Tony Truong349ee492014-10-01 17:35:56 -07002858
Tony Truong9f2c7722017-02-28 15:02:27 -08002859 if (new_boot_option <= 1) {
Tony Truong349ee492014-10-01 17:35:56 -07002860 for (i = 0; i < MAX_RC_NUM; i++) {
2861 if (!rc_sel) {
Tony Truong9f2c7722017-02-28 15:02:27 -08002862 msm_pcie_dev[0].boot_option = new_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002863 PCIE_DBG_FS(&msm_pcie_dev[0],
Tony Truong9f2c7722017-02-28 15:02:27 -08002864 "PCIe: RC0: boot_option is now 0x%x\n",
2865 msm_pcie_dev[0].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002866 break;
2867 } else if (rc_sel & (1 << i)) {
Tony Truong9f2c7722017-02-28 15:02:27 -08002868 msm_pcie_dev[i].boot_option = new_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002869 PCIE_DBG_FS(&msm_pcie_dev[i],
Tony Truong9f2c7722017-02-28 15:02:27 -08002870 "PCIe: RC%d: boot_option is now 0x%x\n",
2871 i, msm_pcie_dev[i].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002872 }
2873 }
2874 } else {
Tony Truong9f2c7722017-02-28 15:02:27 -08002875 pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
2876 new_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002877 }
2878
2879 return count;
2880}
2881
Tony Truong9f2c7722017-02-28 15:02:27 -08002882const struct file_operations msm_pcie_boot_option_ops = {
2883 .write = msm_pcie_set_boot_option,
Tony Truong349ee492014-10-01 17:35:56 -07002884};
2885
2886static ssize_t msm_pcie_set_aer_enable(struct file *file,
2887 const char __user *buf,
2888 size_t count, loff_t *ppos)
2889{
2890 unsigned long ret;
2891 char str[MAX_MSG_LEN];
2892 u32 new_aer_enable = 0;
2893 u32 temp_rc_sel;
2894 int i;
2895
2896 memset(str, 0, sizeof(str));
2897 ret = copy_from_user(str, buf, sizeof(str));
2898 if (ret)
2899 return -EFAULT;
2900
2901 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2902 new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
2903
2904 if (new_aer_enable > 1) {
2905 pr_err(
2906 "PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
2907 new_aer_enable);
2908 return count;
2909 }
2910
2911 if (rc_sel)
2912 temp_rc_sel = rc_sel;
2913 else
2914 temp_rc_sel = 0x1;
2915
2916 for (i = 0; i < MAX_RC_NUM; i++) {
2917 if (temp_rc_sel & (1 << i)) {
2918 msm_pcie_dev[i].aer_enable = new_aer_enable;
2919 PCIE_DBG_FS(&msm_pcie_dev[i],
2920 "PCIe: RC%d: aer_enable is now %d\n",
2921 i, msm_pcie_dev[i].aer_enable);
2922
2923 msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
2924 PCIE20_BRIDGE_CTRL,
2925 new_aer_enable ? 0 : BIT(16),
2926 new_aer_enable ? BIT(16) : 0);
2927
2928 PCIE_DBG_FS(&msm_pcie_dev[i],
2929 "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
2930 readl_relaxed(msm_pcie_dev[i].dm_core +
2931 PCIE20_BRIDGE_CTRL));
2932 }
2933 }
2934
2935 return count;
2936}
2937
2938const struct file_operations msm_pcie_aer_enable_ops = {
2939 .write = msm_pcie_set_aer_enable,
2940};
2941
2942static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
2943 const char __user *buf,
2944 size_t count, loff_t *ppos)
2945{
2946 unsigned long ret;
2947 char str[MAX_MSG_LEN];
2948 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002949 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002950
Tony Truongfdbd5672017-01-06 16:23:14 -08002951 memset(str, 0, size);
2952 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002953 if (ret)
2954 return -EFAULT;
2955
2956 corr_counter_limit = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002957 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002958 corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
2959
2960 pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
2961
2962 return count;
2963}
2964
2965const struct file_operations msm_pcie_corr_counter_limit_ops = {
2966 .write = msm_pcie_set_corr_counter_limit,
2967};
2968
2969static void msm_pcie_debugfs_init(void)
2970{
2971 rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
2972 wr_mask = 0xffffffff;
2973
2974 dent_msm_pcie = debugfs_create_dir("pci-msm", 0);
2975 if (IS_ERR(dent_msm_pcie)) {
2976 pr_err("PCIe: fail to create the folder for debug_fs.\n");
2977 return;
2978 }
2979
2980 dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
2981 dent_msm_pcie, 0,
2982 &msm_pcie_rc_sel_ops);
2983 if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
2984 pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
2985 goto rc_sel_error;
2986 }
2987
2988 dfile_case = debugfs_create_file("case", 0664,
2989 dent_msm_pcie, 0,
2990 &msm_pcie_cmd_debug_ops);
2991 if (!dfile_case || IS_ERR(dfile_case)) {
2992 pr_err("PCIe: fail to create the file for debug_fs case.\n");
2993 goto case_error;
2994 }
2995
2996 dfile_base_sel = debugfs_create_file("base_sel", 0664,
2997 dent_msm_pcie, 0,
2998 &msm_pcie_base_sel_ops);
2999 if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
3000 pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
3001 goto base_sel_error;
3002 }
3003
3004 dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
3005 dent_msm_pcie, 0,
3006 &msm_pcie_linkdown_panic_ops);
3007 if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
3008 pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
3009 goto linkdown_panic_error;
3010 }
3011
3012 dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
3013 dent_msm_pcie, 0,
3014 &msm_pcie_wr_offset_ops);
3015 if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
3016 pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
3017 goto wr_offset_error;
3018 }
3019
3020 dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
3021 dent_msm_pcie, 0,
3022 &msm_pcie_wr_mask_ops);
3023 if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
3024 pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
3025 goto wr_mask_error;
3026 }
3027
3028 dfile_wr_value = debugfs_create_file("wr_value", 0664,
3029 dent_msm_pcie, 0,
3030 &msm_pcie_wr_value_ops);
3031 if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
3032 pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
3033 goto wr_value_error;
3034 }
3035
Tony Truong9f2c7722017-02-28 15:02:27 -08003036 dfile_boot_option = debugfs_create_file("boot_option", 0664,
Tony Truong349ee492014-10-01 17:35:56 -07003037 dent_msm_pcie, 0,
Tony Truong9f2c7722017-02-28 15:02:27 -08003038 &msm_pcie_boot_option_ops);
3039 if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
3040 pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
3041 goto boot_option_error;
Tony Truong349ee492014-10-01 17:35:56 -07003042 }
3043
3044 dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
3045 dent_msm_pcie, 0,
3046 &msm_pcie_aer_enable_ops);
3047 if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
3048 pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
3049 goto aer_enable_error;
3050 }
3051
3052 dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
3053 0664, dent_msm_pcie, 0,
3054 &msm_pcie_corr_counter_limit_ops);
3055 if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
3056 pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
3057 goto corr_counter_limit_error;
3058 }
3059 return;
3060
3061corr_counter_limit_error:
3062 debugfs_remove(dfile_aer_enable);
3063aer_enable_error:
Tony Truong9f2c7722017-02-28 15:02:27 -08003064 debugfs_remove(dfile_boot_option);
3065boot_option_error:
Tony Truong349ee492014-10-01 17:35:56 -07003066 debugfs_remove(dfile_wr_value);
3067wr_value_error:
3068 debugfs_remove(dfile_wr_mask);
3069wr_mask_error:
3070 debugfs_remove(dfile_wr_offset);
3071wr_offset_error:
3072 debugfs_remove(dfile_linkdown_panic);
3073linkdown_panic_error:
3074 debugfs_remove(dfile_base_sel);
3075base_sel_error:
3076 debugfs_remove(dfile_case);
3077case_error:
3078 debugfs_remove(dfile_rc_sel);
3079rc_sel_error:
3080 debugfs_remove(dent_msm_pcie);
3081}
3082
3083static void msm_pcie_debugfs_exit(void)
3084{
3085 debugfs_remove(dfile_rc_sel);
3086 debugfs_remove(dfile_case);
3087 debugfs_remove(dfile_base_sel);
3088 debugfs_remove(dfile_linkdown_panic);
3089 debugfs_remove(dfile_wr_offset);
3090 debugfs_remove(dfile_wr_mask);
3091 debugfs_remove(dfile_wr_value);
Tony Truong9f2c7722017-02-28 15:02:27 -08003092 debugfs_remove(dfile_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07003093 debugfs_remove(dfile_aer_enable);
3094 debugfs_remove(dfile_corr_counter_limit);
3095}
3096#else
3097static void msm_pcie_debugfs_init(void)
3098{
3099}
3100
3101static void msm_pcie_debugfs_exit(void)
3102{
3103}
3104#endif
3105
3106static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
3107{
3108 return readl_relaxed(dev->dm_core +
3109 PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
3110}
3111
3112/**
3113 * msm_pcie_iatu_config - configure outbound address translation region
3114 * @dev: root commpex
3115 * @nr: region number
3116 * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
3117 * @host_addr: - region start address on host
3118 * @host_end: - region end address (low 32 bit) on host,
3119 * upper 32 bits are same as for @host_addr
3120 * @target_addr: - region start address on target
3121 */
3122static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
3123 unsigned long host_addr, u32 host_end,
3124 unsigned long target_addr)
3125{
3126 void __iomem *pcie20 = dev->dm_core;
3127
3128 if (dev->shadow_en) {
3129 dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
3130 nr;
3131 dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
3132 type;
3133 dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
3134 lower_32_bits(host_addr);
3135 dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
3136 upper_32_bits(host_addr);
3137 dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
3138 host_end;
3139 dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
3140 lower_32_bits(target_addr);
3141 dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
3142 upper_32_bits(target_addr);
3143 dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
3144 BIT(31);
3145 }
3146
3147 /* select region */
3148 writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
3149 /* ensure that hardware locks it */
3150 wmb();
3151
3152 /* switch off region before changing it */
3153 writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
3154 /* and wait till it propagates to the hardware */
3155 wmb();
3156
3157 writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
3158 writel_relaxed(lower_32_bits(host_addr),
3159 pcie20 + PCIE20_PLR_IATU_LBAR);
3160 writel_relaxed(upper_32_bits(host_addr),
3161 pcie20 + PCIE20_PLR_IATU_UBAR);
3162 writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
3163 writel_relaxed(lower_32_bits(target_addr),
3164 pcie20 + PCIE20_PLR_IATU_LTAR);
3165 writel_relaxed(upper_32_bits(target_addr),
3166 pcie20 + PCIE20_PLR_IATU_UTAR);
3167 /* ensure that changes propagated to the hardware */
3168 wmb();
3169 writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
3170
3171 /* ensure that changes propagated to the hardware */
3172 wmb();
3173
3174 if (dev->enumerated) {
3175 PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
3176 dev->pcidev_table[nr].bdf >> 24,
3177 dev->pcidev_table[nr].bdf >> 19 & 0x1f,
3178 dev->pcidev_table[nr].bdf >> 16 & 0x07);
3179 PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
3180 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
3181 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
3182 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
3183 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
3184 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
3185 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
3186 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
3187 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
3188 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
3189 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
3190 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
3191 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
3192 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
3193 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
3194 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
3195 }
3196}
3197
3198/**
3199 * msm_pcie_cfg_bdf - configure for config access
3200 * @dev: root commpex
3201 * @bus: PCI bus number
3202 * @devfn: PCI dev and function number
3203 *
3204 * Remap if required region 0 for config access of proper type
3205 * (CFG0 for bus 1, CFG1 for other buses)
3206 * Cache current device bdf for speed-up
3207 */
3208static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
3209{
3210 struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
3211 u32 bdf = BDF_OFFSET(bus, devfn);
3212 u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3213
3214 if (dev->current_bdf == bdf)
3215 return;
3216
3217 msm_pcie_iatu_config(dev, 0, type,
3218 axi_conf->start,
3219 axi_conf->start + SZ_4K - 1,
3220 bdf);
3221
3222 dev->current_bdf = bdf;
3223}
3224
3225static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
3226 u32 word_offset, u32 wr_val,
3227 u32 bdf, bool rc)
3228{
3229 int i, j;
3230 u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
3231
3232 if (rc) {
3233 dev->rc_shadow[word_offset / 4] = wr_val;
3234 } else {
3235 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3236 if (!dev->pcidev_table[i].bdf) {
3237 for (j = 0; j < max_dev; j++)
3238 if (!msm_pcie_dev_tbl[j].bdf) {
3239 msm_pcie_dev_tbl[j].bdf = bdf;
3240 break;
3241 }
3242 dev->pcidev_table[i].bdf = bdf;
3243 if ((!dev->bridge_found) && (i > 0))
3244 dev->bridge_found = true;
3245 }
3246 if (dev->pcidev_table[i].bdf == bdf) {
3247 dev->ep_shadow[i][word_offset / 4] = wr_val;
3248 break;
3249 }
3250 }
3251 }
3252}
3253
3254static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
3255 int where, int size, u32 *val)
3256{
3257 uint32_t word_offset, byte_offset, mask;
3258 uint32_t rd_val, wr_val;
3259 struct msm_pcie_dev_t *dev;
3260 void __iomem *config_base;
3261 bool rc = false;
3262 u32 rc_idx;
3263 int rv = 0;
3264 u32 bdf = BDF_OFFSET(bus->number, devfn);
3265 int i;
3266
3267 dev = PCIE_BUS_PRIV_DATA(bus);
3268
3269 if (!dev) {
3270 pr_err("PCIe: No device found for this bus.\n");
3271 *val = ~0;
3272 rv = PCIBIOS_DEVICE_NOT_FOUND;
3273 goto out;
3274 }
3275
3276 rc_idx = dev->rc_idx;
3277 rc = (bus->number == 0);
3278
3279 spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
3280
3281 if (!dev->cfg_access) {
3282 PCIE_DBG3(dev,
3283 "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
3284 rc_idx, bus->number, devfn, where, size);
3285 *val = ~0;
3286 rv = PCIBIOS_DEVICE_NOT_FOUND;
3287 goto unlock;
3288 }
3289
3290 if (rc && (devfn != 0)) {
3291 PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
3292 (oper == RD) ? "rd" : "wr", bus->number, devfn);
3293 *val = ~0;
3294 rv = PCIBIOS_DEVICE_NOT_FOUND;
3295 goto unlock;
3296 }
3297
3298 if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
3299 PCIE_DBG3(dev,
3300 "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
3301 rc_idx, bus->number, devfn, where, size);
3302 *val = ~0;
3303 rv = PCIBIOS_DEVICE_NOT_FOUND;
3304 goto unlock;
3305 }
3306
3307 /* check if the link is up for endpoint */
3308 if (!rc && !msm_pcie_is_link_up(dev)) {
3309 PCIE_ERR(dev,
3310 "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
3311 rc_idx, (oper == RD) ? "rd" : "wr",
3312 bus->number, devfn);
3313 *val = ~0;
3314 rv = PCIBIOS_DEVICE_NOT_FOUND;
3315 goto unlock;
3316 }
3317
3318 if (!rc && !dev->enumerated)
3319 msm_pcie_cfg_bdf(dev, bus->number, devfn);
3320
3321 word_offset = where & ~0x3;
3322 byte_offset = where & 0x3;
Tony Truonge48ec872017-03-14 12:47:58 -07003323 mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
Tony Truong349ee492014-10-01 17:35:56 -07003324
3325 if (rc || !dev->enumerated) {
3326 config_base = rc ? dev->dm_core : dev->conf;
3327 } else {
3328 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3329 if (dev->pcidev_table[i].bdf == bdf) {
3330 config_base = dev->pcidev_table[i].conf_base;
3331 break;
3332 }
3333 }
3334 if (i == MAX_DEVICE_NUM) {
3335 *val = ~0;
3336 rv = PCIBIOS_DEVICE_NOT_FOUND;
3337 goto unlock;
3338 }
3339 }
3340
3341 rd_val = readl_relaxed(config_base + word_offset);
3342
3343 if (oper == RD) {
3344 *val = ((rd_val & mask) >> (8 * byte_offset));
3345 PCIE_DBG3(dev,
3346 "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
3347 rc_idx, bus->number, devfn, where, size, *val, rd_val);
3348 } else {
3349 wr_val = (rd_val & ~mask) |
3350 ((*val << (8 * byte_offset)) & mask);
3351
3352 if ((bus->number == 0) && (where == 0x3c))
3353 wr_val = wr_val | (3 << 16);
3354
3355 writel_relaxed(wr_val, config_base + word_offset);
3356 wmb(); /* ensure config data is written to hardware register */
3357
Tony Truonge48ec872017-03-14 12:47:58 -07003358 if (dev->shadow_en) {
3359 if (rd_val == PCIE_LINK_DOWN &&
3360 (readl_relaxed(config_base) == PCIE_LINK_DOWN))
3361 PCIE_ERR(dev,
3362 "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
3363 rc_idx, bus->number, devfn,
3364 where, size);
3365 else
3366 msm_pcie_save_shadow(dev, word_offset, wr_val,
3367 bdf, rc);
3368 }
Tony Truong349ee492014-10-01 17:35:56 -07003369
3370 PCIE_DBG3(dev,
3371 "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
3372 rc_idx, bus->number, devfn, where, size,
3373 wr_val, rd_val, *val);
3374 }
3375
3376unlock:
3377 spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
3378out:
3379 return rv;
3380}
3381
3382static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
3383 int size, u32 *val)
3384{
3385 int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
3386
3387 if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
3388 *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
3389 PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
3390 }
3391
3392 return ret;
3393}
3394
3395static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
3396 int where, int size, u32 val)
3397{
3398 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
3399}
3400
3401static struct pci_ops msm_pcie_ops = {
3402 .read = msm_pcie_rd_conf,
3403 .write = msm_pcie_wr_conf,
3404};
3405
3406static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
3407{
3408 int rc = 0, i;
3409 struct msm_pcie_gpio_info_t *info;
3410
3411 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3412
3413 for (i = 0; i < dev->gpio_n; i++) {
3414 info = &dev->gpio[i];
3415
3416 if (!info->num)
3417 continue;
3418
3419 rc = gpio_request(info->num, info->name);
3420 if (rc) {
3421 PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
3422 dev->rc_idx, info->name, rc);
3423 break;
3424 }
3425
3426 if (info->out)
3427 rc = gpio_direction_output(info->num, info->init);
3428 else
3429 rc = gpio_direction_input(info->num);
3430 if (rc) {
3431 PCIE_ERR(dev,
3432 "PCIe: RC%d can't set direction for GPIO %s:%d\n",
3433 dev->rc_idx, info->name, rc);
3434 gpio_free(info->num);
3435 break;
3436 }
3437 }
3438
3439 if (rc)
3440 while (i--)
3441 gpio_free(dev->gpio[i].num);
3442
3443 return rc;
3444}
3445
3446static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
3447{
3448 int i;
3449
3450 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3451
3452 for (i = 0; i < dev->gpio_n; i++)
3453 gpio_free(dev->gpio[i].num);
3454}
3455
3456int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
3457{
3458 int i, rc = 0;
3459 struct regulator *vreg;
3460 struct msm_pcie_vreg_info_t *info;
3461
3462 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3463
3464 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
3465 info = &dev->vreg[i];
3466 vreg = info->hdl;
3467
3468 if (!vreg)
3469 continue;
3470
3471 PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
3472 dev->rc_idx, info->name);
3473 if (info->max_v) {
3474 rc = regulator_set_voltage(vreg,
3475 info->min_v, info->max_v);
3476 if (rc) {
3477 PCIE_ERR(dev,
3478 "PCIe: RC%d can't set voltage for %s: %d\n",
3479 dev->rc_idx, info->name, rc);
3480 break;
3481 }
3482 }
3483
3484 if (info->opt_mode) {
3485 rc = regulator_set_load(vreg, info->opt_mode);
3486 if (rc < 0) {
3487 PCIE_ERR(dev,
3488 "PCIe: RC%d can't set mode for %s: %d\n",
3489 dev->rc_idx, info->name, rc);
3490 break;
3491 }
3492 }
3493
3494 rc = regulator_enable(vreg);
3495 if (rc) {
3496 PCIE_ERR(dev,
3497 "PCIe: RC%d can't enable regulator %s: %d\n",
3498 dev->rc_idx, info->name, rc);
3499 break;
3500 }
3501 }
3502
3503 if (rc)
3504 while (i--) {
3505 struct regulator *hdl = dev->vreg[i].hdl;
3506
3507 if (hdl) {
3508 regulator_disable(hdl);
3509 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3510 PCIE_DBG(dev,
3511 "RC%d: Removing %s vote.\n",
3512 dev->rc_idx,
3513 dev->vreg[i].name);
3514 regulator_set_voltage(hdl,
3515 RPM_REGULATOR_CORNER_NONE,
3516 INT_MAX);
3517 }
3518 }
3519
3520 }
3521
3522 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3523
3524 return rc;
3525}
3526
3527static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
3528{
3529 int i;
3530
3531 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3532
3533 for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
3534 if (dev->vreg[i].hdl) {
3535 PCIE_DBG(dev, "Vreg %s is being disabled\n",
3536 dev->vreg[i].name);
3537 regulator_disable(dev->vreg[i].hdl);
3538
3539 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3540 PCIE_DBG(dev,
3541 "RC%d: Removing %s vote.\n",
3542 dev->rc_idx,
3543 dev->vreg[i].name);
3544 regulator_set_voltage(dev->vreg[i].hdl,
3545 RPM_REGULATOR_CORNER_NONE,
3546 INT_MAX);
3547 }
3548 }
3549 }
3550
3551 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3552}
3553
3554static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
3555{
3556 int i, rc = 0;
3557 struct msm_pcie_clk_info_t *info;
3558 struct msm_pcie_reset_info_t *reset_info;
3559
3560 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3561
3562 rc = regulator_enable(dev->gdsc);
3563
3564 if (rc) {
3565 PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
3566 dev->rc_idx, dev->pdev->name);
3567 return rc;
3568 }
3569
3570 if (dev->gdsc_smmu) {
3571 rc = regulator_enable(dev->gdsc_smmu);
3572
3573 if (rc) {
3574 PCIE_ERR(dev,
3575 "PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
3576 dev->rc_idx, dev->pdev->name);
3577 return rc;
3578 }
3579 }
3580
3581 PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
3582 if (dev->bus_client) {
3583 rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
3584 if (rc) {
3585 PCIE_ERR(dev,
3586 "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
3587 dev->rc_idx, rc);
3588 return rc;
3589 }
3590
3591 PCIE_DBG2(dev,
3592 "PCIe: set bus bandwidth for RC%d.\n",
3593 dev->rc_idx);
3594 }
3595
3596 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3597 info = &dev->clk[i];
3598
3599 if (!info->hdl)
3600 continue;
3601
3602 if (info->config_mem)
3603 msm_pcie_config_clock_mem(dev, info);
3604
3605 if (info->freq) {
3606 rc = clk_set_rate(info->hdl, info->freq);
3607 if (rc) {
3608 PCIE_ERR(dev,
3609 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3610 dev->rc_idx, info->name, rc);
3611 break;
3612 }
3613
3614 PCIE_DBG2(dev,
3615 "PCIe: RC%d set rate for clk %s.\n",
3616 dev->rc_idx, info->name);
3617 }
3618
3619 rc = clk_prepare_enable(info->hdl);
3620
3621 if (rc)
3622 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
3623 dev->rc_idx, info->name);
3624 else
3625 PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
3626 info->name, dev->rc_idx);
3627 }
3628
3629 if (rc) {
3630 PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
3631 dev->rc_idx);
3632 while (i--) {
3633 struct clk *hdl = dev->clk[i].hdl;
3634
3635 if (hdl)
3636 clk_disable_unprepare(hdl);
3637 }
3638
3639 if (dev->gdsc_smmu)
3640 regulator_disable(dev->gdsc_smmu);
3641
3642 regulator_disable(dev->gdsc);
3643 }
3644
3645 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3646 reset_info = &dev->reset[i];
3647 if (reset_info->hdl) {
3648 rc = reset_control_deassert(reset_info->hdl);
3649 if (rc)
3650 PCIE_ERR(dev,
3651 "PCIe: RC%d failed to deassert reset for %s.\n",
3652 dev->rc_idx, reset_info->name);
3653 else
3654 PCIE_DBG2(dev,
3655 "PCIe: RC%d successfully deasserted reset for %s.\n",
3656 dev->rc_idx, reset_info->name);
3657 }
3658 }
3659
3660 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3661
3662 return rc;
3663}
3664
3665static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
3666{
3667 int i;
3668 int rc;
3669
3670 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3671
3672 for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
3673 if (dev->clk[i].hdl)
3674 clk_disable_unprepare(dev->clk[i].hdl);
3675
3676 if (dev->bus_client) {
3677 PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
3678 dev->rc_idx);
3679
3680 rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
3681 if (rc)
3682 PCIE_ERR(dev,
3683 "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
3684 dev->rc_idx, rc);
3685 else
3686 PCIE_DBG(dev,
3687 "PCIe: relinquish bus bandwidth for RC%d.\n",
3688 dev->rc_idx);
3689 }
3690
3691 if (dev->gdsc_smmu)
3692 regulator_disable(dev->gdsc_smmu);
3693
3694 regulator_disable(dev->gdsc);
3695
3696 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3697}
3698
3699static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
3700{
3701 int i, rc = 0;
3702 struct msm_pcie_clk_info_t *info;
3703 struct msm_pcie_reset_info_t *pipe_reset_info;
3704
3705 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3706
3707 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3708 info = &dev->pipeclk[i];
3709
3710 if (!info->hdl)
3711 continue;
3712
3713
3714 if (info->config_mem)
3715 msm_pcie_config_clock_mem(dev, info);
3716
3717 if (info->freq) {
3718 rc = clk_set_rate(info->hdl, info->freq);
3719 if (rc) {
3720 PCIE_ERR(dev,
3721 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3722 dev->rc_idx, info->name, rc);
3723 break;
3724 }
3725
3726 PCIE_DBG2(dev,
3727 "PCIe: RC%d set rate for clk %s: %d.\n",
3728 dev->rc_idx, info->name, rc);
3729 }
3730
3731 rc = clk_prepare_enable(info->hdl);
3732
3733 if (rc)
3734 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
3735 dev->rc_idx, info->name);
3736 else
3737 PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
3738 dev->rc_idx, info->name);
3739 }
3740
3741 if (rc) {
3742 PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
3743 dev->rc_idx);
3744 while (i--)
3745 if (dev->pipeclk[i].hdl)
3746 clk_disable_unprepare(dev->pipeclk[i].hdl);
3747 }
3748
3749 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3750 pipe_reset_info = &dev->pipe_reset[i];
3751 if (pipe_reset_info->hdl) {
3752 rc = reset_control_deassert(
3753 pipe_reset_info->hdl);
3754 if (rc)
3755 PCIE_ERR(dev,
3756 "PCIe: RC%d failed to deassert pipe reset for %s.\n",
3757 dev->rc_idx, pipe_reset_info->name);
3758 else
3759 PCIE_DBG2(dev,
3760 "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
3761 dev->rc_idx, pipe_reset_info->name);
3762 }
3763 }
3764
3765 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3766
3767 return rc;
3768}
3769
3770static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
3771{
3772 int i;
3773
3774 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3775
3776 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
3777 if (dev->pipeclk[i].hdl)
3778 clk_disable_unprepare(
3779 dev->pipeclk[i].hdl);
3780
3781 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3782}
3783
3784static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
3785{
3786 int i;
3787 u8 type;
3788 struct msm_pcie_device_info *dev_table = dev->pcidev_table;
3789
3790 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3791 if (!dev_table[i].bdf)
3792 break;
3793
3794 type = dev_table[i].bdf >> 24 == 0x1 ?
3795 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3796
3797 msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
3798 dev_table[i].phy_address + SZ_4K - 1,
3799 dev_table[i].bdf);
3800 }
3801}
3802
3803static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
3804{
3805 int i;
3806
3807 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3808
3809 /*
3810 * program and enable address translation region 0 (device config
3811 * address space); region type config;
3812 * axi config address range to device config address range
3813 */
3814 if (dev->enumerated) {
3815 msm_pcie_iatu_config_all_ep(dev);
3816 } else {
3817 dev->current_bdf = 0; /* to force IATU re-config */
3818 msm_pcie_cfg_bdf(dev, 1, 0);
3819 }
3820
3821 /* configure N_FTS */
3822 PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3823 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3824 if (!dev->n_fts)
3825 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3826 0, BIT(15));
3827 else
3828 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3829 PCIE20_ACK_N_FTS,
3830 dev->n_fts << 8);
3831
3832 if (dev->shadow_en)
3833 dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
3834 readl_relaxed(dev->dm_core +
3835 PCIE20_ACK_F_ASPM_CTRL_REG);
3836
3837 PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3838 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3839
3840 /* configure AUX clock frequency register for PCIe core */
3841 if (dev->use_19p2mhz_aux_clk)
3842 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
3843 else
3844 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
3845
3846 /* configure the completion timeout value for PCIe core */
3847 if (dev->cpl_timeout && dev->bridge_found)
3848 msm_pcie_write_reg_field(dev->dm_core,
3849 PCIE20_DEVICE_CONTROL2_STATUS2,
3850 0xf, dev->cpl_timeout);
3851
3852 /* Enable AER on RC */
3853 if (dev->aer_enable) {
3854 msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
3855 BIT(16)|BIT(17));
3856 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
3857 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3858
3859 PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3860 readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
3861 }
3862
3863 /* configure SMMU registers */
3864 if (dev->smmu_exist) {
3865 msm_pcie_write_reg(dev->parf,
3866 PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
3867 msm_pcie_write_reg(dev->parf,
3868 PCIE20_PARF_SID_OFFSET, 0);
3869
3870 if (dev->enumerated) {
3871 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3872 if (dev->pcidev_table[i].dev &&
3873 dev->pcidev_table[i].short_bdf) {
3874 msm_pcie_write_reg(dev->parf,
3875 PCIE20_PARF_BDF_TRANSLATE_N +
3876 dev->pcidev_table[i].short_bdf
3877 * 4,
3878 dev->pcidev_table[i].bdf >> 16);
3879 }
3880 }
3881 }
3882 }
3883}
3884
3885static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
3886{
3887 u32 val;
3888 u32 current_offset;
3889 u32 ep_l1sub_ctrl1_offset = 0;
3890 u32 ep_l1sub_cap_reg1_offset = 0;
3891 u32 ep_link_cap_offset = 0;
3892 u32 ep_link_ctrlstts_offset = 0;
3893 u32 ep_dev_ctrl2stts2_offset = 0;
3894
3895 /* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/
3896 if (!dev->aux_clk_sync && dev->l1ss_supported)
3897 msm_pcie_write_mask(dev->parf +
3898 PCIE20_PARF_SYS_CTRL, BIT(3), 0);
3899
3900 current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
3901
3902 while (current_offset) {
3903 if (msm_pcie_check_align(dev, current_offset))
3904 return;
3905
3906 val = readl_relaxed(dev->conf + current_offset);
3907 if ((val & 0xff) == PCIE20_CAP_ID) {
3908 ep_link_cap_offset = current_offset + 0x0c;
3909 ep_link_ctrlstts_offset = current_offset + 0x10;
3910 ep_dev_ctrl2stts2_offset = current_offset + 0x28;
3911 break;
3912 }
3913 current_offset = (val >> 8) & 0xff;
3914 }
3915
3916 if (!ep_link_cap_offset) {
3917 PCIE_DBG(dev,
3918 "RC%d endpoint does not support PCIe capability registers\n",
3919 dev->rc_idx);
3920 return;
3921 }
3922
3923 PCIE_DBG(dev,
3924 "RC%d: ep_link_cap_offset: 0x%x\n",
3925 dev->rc_idx, ep_link_cap_offset);
3926
3927 if (dev->common_clk_en) {
3928 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3929 0, BIT(6));
3930
3931 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3932 0, BIT(6));
3933
3934 if (dev->shadow_en) {
3935 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3936 readl_relaxed(dev->dm_core +
3937 PCIE20_CAP_LINKCTRLSTATUS);
3938
3939 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3940 readl_relaxed(dev->conf +
3941 ep_link_ctrlstts_offset);
3942 }
3943
3944 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3945 readl_relaxed(dev->dm_core +
3946 PCIE20_CAP_LINKCTRLSTATUS));
3947 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3948 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3949 }
3950
3951 if (dev->clk_power_manage_en) {
3952 val = readl_relaxed(dev->conf + ep_link_cap_offset);
3953 if (val & BIT(18)) {
3954 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3955 0, BIT(8));
3956
3957 if (dev->shadow_en)
3958 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3959 readl_relaxed(dev->conf +
3960 ep_link_ctrlstts_offset);
3961
3962 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3963 readl_relaxed(dev->conf +
3964 ep_link_ctrlstts_offset));
3965 }
3966 }
3967
3968 if (dev->l0s_supported) {
3969 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3970 0, BIT(0));
3971 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3972 0, BIT(0));
3973 if (dev->shadow_en) {
3974 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3975 readl_relaxed(dev->dm_core +
3976 PCIE20_CAP_LINKCTRLSTATUS);
3977 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3978 readl_relaxed(dev->conf +
3979 ep_link_ctrlstts_offset);
3980 }
3981 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3982 readl_relaxed(dev->dm_core +
3983 PCIE20_CAP_LINKCTRLSTATUS));
3984 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3985 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3986 }
3987
3988 if (dev->l1_supported) {
3989 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3990 0, BIT(1));
3991 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3992 0, BIT(1));
3993 if (dev->shadow_en) {
3994 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3995 readl_relaxed(dev->dm_core +
3996 PCIE20_CAP_LINKCTRLSTATUS);
3997 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3998 readl_relaxed(dev->conf +
3999 ep_link_ctrlstts_offset);
4000 }
4001 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
4002 readl_relaxed(dev->dm_core +
4003 PCIE20_CAP_LINKCTRLSTATUS));
4004 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
4005 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
4006 }
4007
4008 if (dev->l1ss_supported) {
4009 current_offset = PCIE_EXT_CAP_OFFSET;
4010 while (current_offset) {
4011 if (msm_pcie_check_align(dev, current_offset))
4012 return;
4013
4014 val = readl_relaxed(dev->conf + current_offset);
4015 if ((val & 0xffff) == L1SUB_CAP_ID) {
4016 ep_l1sub_cap_reg1_offset = current_offset + 0x4;
4017 ep_l1sub_ctrl1_offset = current_offset + 0x8;
4018 break;
4019 }
4020 current_offset = val >> 20;
4021 }
4022 if (!ep_l1sub_ctrl1_offset) {
4023 PCIE_DBG(dev,
4024 "RC%d endpoint does not support l1ss registers\n",
4025 dev->rc_idx);
4026 return;
4027 }
4028
4029 val = readl_relaxed(dev->conf + ep_l1sub_cap_reg1_offset);
4030
4031 PCIE_DBG2(dev, "EP's L1SUB_CAPABILITY_REG_1: 0x%x\n", val);
4032 PCIE_DBG2(dev, "RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
4033 dev->rc_idx, ep_l1sub_ctrl1_offset);
4034
4035 val &= 0xf;
4036
4037 msm_pcie_write_reg_field(dev->dm_core, PCIE20_L1SUB_CONTROL1,
4038 0xf, val);
4039 msm_pcie_write_mask(dev->dm_core +
4040 PCIE20_DEVICE_CONTROL2_STATUS2,
4041 0, BIT(10));
4042 msm_pcie_write_reg_field(dev->conf, ep_l1sub_ctrl1_offset,
4043 0xf, val);
4044 msm_pcie_write_mask(dev->conf + ep_dev_ctrl2stts2_offset,
4045 0, BIT(10));
4046 if (dev->shadow_en) {
4047 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
4048 readl_relaxed(dev->dm_core +
4049 PCIE20_L1SUB_CONTROL1);
4050 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
4051 readl_relaxed(dev->dm_core +
4052 PCIE20_DEVICE_CONTROL2_STATUS2);
4053 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
4054 readl_relaxed(dev->conf +
4055 ep_l1sub_ctrl1_offset);
4056 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
4057 readl_relaxed(dev->conf +
4058 ep_dev_ctrl2stts2_offset);
4059 }
4060 PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n",
4061 readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1));
4062 PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
4063 readl_relaxed(dev->dm_core +
4064 PCIE20_DEVICE_CONTROL2_STATUS2));
4065 PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n",
4066 readl_relaxed(dev->conf + ep_l1sub_ctrl1_offset));
4067 PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
4068 readl_relaxed(dev->conf +
4069 ep_dev_ctrl2stts2_offset));
4070 }
4071}
4072
4073void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
4074{
4075 int i;
4076
4077 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4078
4079 /* program MSI controller and enable all interrupts */
4080 writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
4081 writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
4082
4083 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
4084 writel_relaxed(~0, dev->dm_core +
4085 PCIE20_MSI_CTRL_INTR_EN + (i * 12));
4086
4087 /* ensure that hardware is configured before proceeding */
4088 wmb();
4089}
4090
4091static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
4092 struct platform_device *pdev)
4093{
4094 int i, len, cnt, ret = 0, size = 0;
4095 struct msm_pcie_vreg_info_t *vreg_info;
4096 struct msm_pcie_gpio_info_t *gpio_info;
4097 struct msm_pcie_clk_info_t *clk_info;
4098 struct resource *res;
4099 struct msm_pcie_res_info_t *res_info;
4100 struct msm_pcie_irq_info_t *irq_info;
4101 struct msm_pcie_irq_info_t *msi_info;
4102 struct msm_pcie_reset_info_t *reset_info;
4103 struct msm_pcie_reset_info_t *pipe_reset_info;
4104 char prop_name[MAX_PROP_SIZE];
4105 const __be32 *prop;
4106 u32 *clkfreq = NULL;
4107
4108 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4109
4110 cnt = of_property_count_strings((&pdev->dev)->of_node,
4111 "clock-names");
4112 if (cnt > 0) {
4113 clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
4114 sizeof(*clkfreq), GFP_KERNEL);
4115 if (!clkfreq) {
4116 PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
4117 dev->rc_idx);
4118 return -ENOMEM;
4119 }
4120 ret = of_property_read_u32_array(
4121 (&pdev->dev)->of_node,
4122 "max-clock-frequency-hz", clkfreq, cnt);
4123 if (ret) {
4124 PCIE_ERR(dev,
4125 "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
4126 dev->rc_idx, ret);
4127 goto out;
4128 }
4129 }
4130
4131 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
4132 vreg_info = &dev->vreg[i];
4133 vreg_info->hdl =
4134 devm_regulator_get(&pdev->dev, vreg_info->name);
4135
4136 if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
4137 PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
4138 vreg_info->name);
4139 ret = PTR_ERR(vreg_info->hdl);
4140 goto out;
4141 }
4142
4143 if (IS_ERR(vreg_info->hdl)) {
4144 if (vreg_info->required) {
4145 PCIE_DBG(dev, "Vreg %s doesn't exist\n",
4146 vreg_info->name);
4147 ret = PTR_ERR(vreg_info->hdl);
4148 goto out;
4149 } else {
4150 PCIE_DBG(dev,
4151 "Optional Vreg %s doesn't exist\n",
4152 vreg_info->name);
4153 vreg_info->hdl = NULL;
4154 }
4155 } else {
4156 dev->vreg_n++;
4157 snprintf(prop_name, MAX_PROP_SIZE,
4158 "qcom,%s-voltage-level", vreg_info->name);
4159 prop = of_get_property((&pdev->dev)->of_node,
4160 prop_name, &len);
4161 if (!prop || (len != (3 * sizeof(__be32)))) {
4162 PCIE_DBG(dev, "%s %s property\n",
4163 prop ? "invalid format" :
4164 "no", prop_name);
4165 } else {
4166 vreg_info->max_v = be32_to_cpup(&prop[0]);
4167 vreg_info->min_v = be32_to_cpup(&prop[1]);
4168 vreg_info->opt_mode =
4169 be32_to_cpup(&prop[2]);
4170 }
4171 }
4172 }
4173
4174 dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
4175
4176 if (IS_ERR(dev->gdsc)) {
4177 PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
4178 dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
4179 if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
4180 PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
4181 dev->pdev->name);
4182 ret = PTR_ERR(dev->gdsc);
4183 goto out;
4184 }
4185
4186 dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
4187
4188 if (IS_ERR(dev->gdsc_smmu)) {
4189 PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
4190 dev->rc_idx);
4191 dev->gdsc_smmu = NULL;
4192 }
4193
4194 dev->gpio_n = 0;
4195 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
4196 gpio_info = &dev->gpio[i];
4197 ret = of_get_named_gpio((&pdev->dev)->of_node,
4198 gpio_info->name, 0);
4199 if (ret >= 0) {
4200 gpio_info->num = ret;
4201 dev->gpio_n++;
4202 PCIE_DBG(dev, "GPIO num for %s is %d\n",
4203 gpio_info->name, gpio_info->num);
4204 } else {
4205 if (gpio_info->required) {
4206 PCIE_ERR(dev,
4207 "Could not get required GPIO %s\n",
4208 gpio_info->name);
4209 goto out;
4210 } else {
4211 PCIE_DBG(dev,
4212 "Could not get optional GPIO %s\n",
4213 gpio_info->name);
4214 }
4215 }
4216 ret = 0;
4217 }
4218
4219 of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
4220 if (size) {
4221 dev->phy_sequence = (struct msm_pcie_phy_info_t *)
4222 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4223
4224 if (dev->phy_sequence) {
4225 dev->phy_len =
4226 size / sizeof(*dev->phy_sequence);
4227
4228 of_property_read_u32_array(pdev->dev.of_node,
4229 "qcom,phy-sequence",
4230 (unsigned int *)dev->phy_sequence,
4231 size / sizeof(dev->phy_sequence->offset));
4232 } else {
4233 PCIE_ERR(dev,
4234 "RC%d: Could not allocate memory for phy init sequence.\n",
4235 dev->rc_idx);
4236 ret = -ENOMEM;
4237 goto out;
4238 }
4239 } else {
4240 PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
4241 dev->rc_idx);
4242 }
4243
4244 of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
4245 if (size) {
4246 dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
4247 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4248
4249 if (dev->port_phy_sequence) {
4250 dev->port_phy_len =
4251 size / sizeof(*dev->port_phy_sequence);
4252
4253 of_property_read_u32_array(pdev->dev.of_node,
4254 "qcom,port-phy-sequence",
4255 (unsigned int *)dev->port_phy_sequence,
4256 size / sizeof(dev->port_phy_sequence->offset));
4257 } else {
4258 PCIE_ERR(dev,
4259 "RC%d: Could not allocate memory for port phy init sequence.\n",
4260 dev->rc_idx);
4261 ret = -ENOMEM;
4262 goto out;
4263 }
4264 } else {
4265 PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
4266 dev->rc_idx);
4267 }
4268
4269 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
4270 clk_info = &dev->clk[i];
4271
4272 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4273
4274 if (IS_ERR(clk_info->hdl)) {
4275 if (clk_info->required) {
4276 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4277 clk_info->name, PTR_ERR(clk_info->hdl));
4278 ret = PTR_ERR(clk_info->hdl);
4279 goto out;
4280 } else {
4281 PCIE_DBG(dev, "Ignoring Clock %s\n",
4282 clk_info->name);
4283 clk_info->hdl = NULL;
4284 }
4285 } else {
4286 if (clkfreq != NULL) {
4287 clk_info->freq = clkfreq[i +
4288 MSM_PCIE_MAX_PIPE_CLK];
4289 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4290 clk_info->name, clk_info->freq);
4291 }
4292 }
4293 }
4294
4295 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
4296 clk_info = &dev->pipeclk[i];
4297
4298 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4299
4300 if (IS_ERR(clk_info->hdl)) {
4301 if (clk_info->required) {
4302 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4303 clk_info->name, PTR_ERR(clk_info->hdl));
4304 ret = PTR_ERR(clk_info->hdl);
4305 goto out;
4306 } else {
4307 PCIE_DBG(dev, "Ignoring Clock %s\n",
4308 clk_info->name);
4309 clk_info->hdl = NULL;
4310 }
4311 } else {
4312 if (clkfreq != NULL) {
4313 clk_info->freq = clkfreq[i];
4314 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4315 clk_info->name, clk_info->freq);
4316 }
4317 }
4318 }
4319
4320 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
4321 reset_info = &dev->reset[i];
4322
4323 reset_info->hdl = devm_reset_control_get(&pdev->dev,
4324 reset_info->name);
4325
4326 if (IS_ERR(reset_info->hdl)) {
4327 if (reset_info->required) {
4328 PCIE_DBG(dev,
4329 "Reset %s isn't available:%ld\n",
4330 reset_info->name,
4331 PTR_ERR(reset_info->hdl));
4332
4333 ret = PTR_ERR(reset_info->hdl);
4334 reset_info->hdl = NULL;
4335 goto out;
4336 } else {
4337 PCIE_DBG(dev, "Ignoring Reset %s\n",
4338 reset_info->name);
4339 reset_info->hdl = NULL;
4340 }
4341 }
4342 }
4343
4344 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
4345 pipe_reset_info = &dev->pipe_reset[i];
4346
4347 pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
4348 pipe_reset_info->name);
4349
4350 if (IS_ERR(pipe_reset_info->hdl)) {
4351 if (pipe_reset_info->required) {
4352 PCIE_DBG(dev,
4353 "Pipe Reset %s isn't available:%ld\n",
4354 pipe_reset_info->name,
4355 PTR_ERR(pipe_reset_info->hdl));
4356
4357 ret = PTR_ERR(pipe_reset_info->hdl);
4358 pipe_reset_info->hdl = NULL;
4359 goto out;
4360 } else {
4361 PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
4362 pipe_reset_info->name);
4363 pipe_reset_info->hdl = NULL;
4364 }
4365 }
4366 }
4367
4368 dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
4369 if (!dev->bus_scale_table) {
4370 PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
4371 dev->rc_idx, dev->pdev->name);
4372 dev->bus_client = 0;
4373 } else {
4374 dev->bus_client =
4375 msm_bus_scale_register_client(dev->bus_scale_table);
4376 if (!dev->bus_client) {
4377 PCIE_ERR(dev,
4378 "PCIe: Failed to register bus client for RC%d (%s)\n",
4379 dev->rc_idx, dev->pdev->name);
4380 msm_bus_cl_clear_pdata(dev->bus_scale_table);
4381 ret = -ENODEV;
4382 goto out;
4383 }
4384 }
4385
4386 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
4387 res_info = &dev->res[i];
4388
4389 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4390 res_info->name);
4391
4392 if (!res) {
4393 PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
4394 dev->rc_idx, res_info->name);
4395 } else {
4396 PCIE_DBG(dev, "start addr for %s is %pa.\n",
4397 res_info->name, &res->start);
4398
4399 res_info->base = devm_ioremap(&pdev->dev,
4400 res->start, resource_size(res));
4401 if (!res_info->base) {
4402 PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
4403 dev->rc_idx, res_info->name);
4404 ret = -ENOMEM;
4405 goto out;
4406 } else {
4407 res_info->resource = res;
4408 }
4409 }
4410 }
4411
4412 for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
4413 irq_info = &dev->irq[i];
4414
4415 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4416 irq_info->name);
4417
4418 if (!res) {
4419 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4420 dev->rc_idx, irq_info->name);
4421 } else {
4422 irq_info->num = res->start;
4423 PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
4424 irq_info->num);
4425 }
4426 }
4427
4428 for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
4429 msi_info = &dev->msi[i];
4430
4431 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4432 msi_info->name);
4433
4434 if (!res) {
4435 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4436 dev->rc_idx, msi_info->name);
4437 } else {
4438 msi_info->num = res->start;
4439 PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
4440 msi_info->num);
4441 }
4442 }
4443
4444 /* All allocations succeeded */
4445
4446 if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
4447 dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
4448 else
4449 dev->wake_n = 0;
4450
4451 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
4452 dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
4453 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
4454 dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
4455 dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
4456 dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
4457 dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
4458 dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
4459 dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
4460 dev->dev_io_res->flags = IORESOURCE_IO;
4461
4462out:
4463 kfree(clkfreq);
4464
4465 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4466
4467 return ret;
4468}
4469
4470static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
4471{
4472 dev->parf = NULL;
4473 dev->elbi = NULL;
4474 dev->dm_core = NULL;
4475 dev->conf = NULL;
4476 dev->bars = NULL;
4477 dev->tcsr = NULL;
4478 dev->dev_mem_res = NULL;
4479 dev->dev_io_res = NULL;
4480}
4481
4482int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
4483{
4484 int ret = 0;
4485 uint32_t val;
4486 long int retries = 0;
4487 int link_check_count = 0;
4488
4489 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4490
4491 mutex_lock(&dev->setup_lock);
4492
4493 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
4494 PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
4495 dev->rc_idx);
4496 goto out;
4497 }
4498
4499 /* assert PCIe reset link to keep EP in reset */
4500
4501 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4502 dev->rc_idx);
4503 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4504 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4505 usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
4506 PERST_PROPAGATION_DELAY_US_MAX);
4507
4508 /* enable power */
4509
4510 if (options & PM_VREG) {
4511 ret = msm_pcie_vreg_init(dev);
4512 if (ret)
4513 goto out;
4514 }
4515
4516 /* enable clocks */
4517 if (options & PM_CLK) {
4518 ret = msm_pcie_clk_init(dev);
4519 /* ensure that changes propagated to the hardware */
4520 wmb();
4521 if (ret)
4522 goto clk_fail;
4523 }
4524
4525 if (dev->scm_dev_id) {
4526 PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
4527 msm_pcie_restore_sec_config(dev);
4528 }
4529
4530 /* enable PCIe clocks and resets */
4531 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
4532
4533 /* change DBI base address */
4534 writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
4535
4536 writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
4537
4538 msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
4539 0, BIT(4));
4540
4541 /* enable selected IRQ */
4542 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
4543 msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
4544
4545 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
4546 BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
4547 BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
4548 BIT(MSM_PCIE_INT_EVT_AER_ERR) |
4549 BIT(MSM_PCIE_INT_EVT_MSI_0) |
4550 BIT(MSM_PCIE_INT_EVT_MSI_1) |
4551 BIT(MSM_PCIE_INT_EVT_MSI_2) |
4552 BIT(MSM_PCIE_INT_EVT_MSI_3) |
4553 BIT(MSM_PCIE_INT_EVT_MSI_4) |
4554 BIT(MSM_PCIE_INT_EVT_MSI_5) |
4555 BIT(MSM_PCIE_INT_EVT_MSI_6) |
4556 BIT(MSM_PCIE_INT_EVT_MSI_7));
4557
4558 PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
4559 dev->rc_idx,
4560 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
4561 }
4562
4563 if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_16M)
4564 writel_relaxed(SZ_32M, dev->parf +
4565 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4566 else if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_8M)
4567 writel_relaxed(SZ_16M, dev->parf +
4568 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4569 else
4570 writel_relaxed(SZ_8M, dev->parf +
4571 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4572
4573 if (dev->use_msi) {
4574 PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
4575 val = dev->wr_halt_size ? dev->wr_halt_size :
4576 readl_relaxed(dev->parf +
4577 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
4578
4579 msm_pcie_write_reg(dev->parf,
4580 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
4581 BIT(31) | val);
4582
4583 PCIE_DBG(dev,
4584 "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
4585 dev->rc_idx,
4586 readl_relaxed(dev->parf +
4587 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
4588 }
4589
4590 mutex_lock(&com_phy_lock);
4591 /* init PCIe PHY */
4592 if (!num_rc_on)
4593 pcie_phy_init(dev);
4594
4595 num_rc_on++;
4596 mutex_unlock(&com_phy_lock);
4597
4598 if (options & PM_PIPE_CLK) {
4599 usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
4600 PHY_STABILIZATION_DELAY_US_MAX);
4601 /* Enable the pipe clock */
4602 ret = msm_pcie_pipe_clk_init(dev);
4603 /* ensure that changes propagated to the hardware */
4604 wmb();
4605 if (ret)
4606 goto link_fail;
4607 }
4608
4609 PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
4610
4611 do {
4612 if (pcie_phy_is_ready(dev))
4613 break;
4614 retries++;
4615 usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
4616 REFCLK_STABILIZATION_DELAY_US_MAX);
4617 } while (retries < PHY_READY_TIMEOUT_COUNT);
4618
4619 PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
4620 dev->rc_idx, retries);
4621
4622 if (pcie_phy_is_ready(dev))
4623 PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
4624 else {
4625 PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
4626 dev->rc_idx);
4627 ret = -ENODEV;
4628 pcie_phy_dump(dev);
4629 goto link_fail;
4630 }
4631
4632 pcie_pcs_port_phy_init(dev);
4633
4634 if (dev->ep_latency)
4635 usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
4636
4637 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4638 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4639 dev->gpio[MSM_PCIE_GPIO_EP].on);
4640
4641 /* de-assert PCIe reset link to bring EP out of reset */
4642
4643 PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
4644 dev->rc_idx);
4645 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4646 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
4647 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
4648
4649 /* set max tlp read size */
4650 msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
4651 0x7000, dev->tlp_rd_size);
4652
4653 /* enable link training */
4654 msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
4655
4656 PCIE_DBG(dev, "%s", "check if link is up\n");
4657
4658 /* Wait for up to 100ms for the link to come up */
4659 do {
4660 usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
4661 val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
Tony Truongf9f5ff02017-03-29 11:28:44 -07004662 PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
4663 dev->rc_idx, (val >> 12) & 0x3f);
Tony Truong349ee492014-10-01 17:35:56 -07004664 } while ((!(val & XMLH_LINK_UP) ||
4665 !msm_pcie_confirm_linkup(dev, false, false, NULL))
4666 && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
4667
4668 if ((val & XMLH_LINK_UP) &&
4669 msm_pcie_confirm_linkup(dev, false, false, NULL)) {
4670 PCIE_DBG(dev, "Link is up after %d checkings\n",
4671 link_check_count);
4672 PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
4673 } else {
4674 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4675 dev->rc_idx);
4676 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4677 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4678 PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
4679 dev->rc_idx);
4680 ret = -1;
4681 goto link_fail;
4682 }
4683
4684 msm_pcie_config_controller(dev);
4685
4686 if (!dev->msi_gicm_addr)
4687 msm_pcie_config_msi_controller(dev);
4688
4689 msm_pcie_config_link_state(dev);
4690
4691 dev->link_status = MSM_PCIE_LINK_ENABLED;
4692 dev->power_on = true;
4693 dev->suspending = false;
4694 dev->link_turned_on_counter++;
4695
4696 goto out;
4697
4698link_fail:
4699 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4700 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4701 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4702 msm_pcie_write_reg(dev->phy,
4703 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4704 msm_pcie_write_reg(dev->phy,
4705 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4706
4707 mutex_lock(&com_phy_lock);
4708 num_rc_on--;
4709 if (!num_rc_on && dev->common_phy) {
4710 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4711 dev->rc_idx);
4712 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4713 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4714 }
4715 mutex_unlock(&com_phy_lock);
4716
4717 msm_pcie_pipe_clk_deinit(dev);
4718 msm_pcie_clk_deinit(dev);
4719clk_fail:
4720 msm_pcie_vreg_deinit(dev);
4721out:
4722 mutex_unlock(&dev->setup_lock);
4723
4724 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4725
4726 return ret;
4727}
4728
4729void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
4730{
4731 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4732
4733 mutex_lock(&dev->setup_lock);
4734
4735 if (!dev->power_on) {
4736 PCIE_DBG(dev,
4737 "PCIe: the link of RC%d is already power down.\n",
4738 dev->rc_idx);
4739 mutex_unlock(&dev->setup_lock);
4740 return;
4741 }
4742
4743 dev->link_status = MSM_PCIE_LINK_DISABLED;
4744 dev->power_on = false;
4745 dev->link_turned_off_counter++;
4746
4747 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4748 dev->rc_idx);
4749
4750 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4751 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4752
4753 msm_pcie_write_reg(dev->phy,
4754 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4755 msm_pcie_write_reg(dev->phy,
4756 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4757
4758 mutex_lock(&com_phy_lock);
4759 num_rc_on--;
4760 if (!num_rc_on && dev->common_phy) {
4761 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4762 dev->rc_idx);
4763 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4764 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4765 }
4766 mutex_unlock(&com_phy_lock);
4767
4768 if (options & PM_CLK) {
4769 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
4770 BIT(0));
4771 msm_pcie_clk_deinit(dev);
4772 }
4773
4774 if (options & PM_VREG)
4775 msm_pcie_vreg_deinit(dev);
4776
4777 if (options & PM_PIPE_CLK)
4778 msm_pcie_pipe_clk_deinit(dev);
4779
4780 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4781 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4782 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4783
4784 mutex_unlock(&dev->setup_lock);
4785
4786 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4787}
4788
4789static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
4790 struct msm_pcie_device_info *ep_dev_info)
4791{
4792 u32 val;
4793 void __iomem *ep_base = ep_dev_info->conf_base;
4794 u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
4795 0xff;
4796
4797 while (current_offset) {
4798 if (msm_pcie_check_align(dev, current_offset))
4799 return;
4800
4801 val = readl_relaxed(ep_base + current_offset);
4802 if ((val & 0xff) == PCIE20_CAP_ID) {
4803 ep_dev_info->dev_ctrlstts_offset =
4804 current_offset + 0x8;
4805 break;
4806 }
4807 current_offset = (val >> 8) & 0xff;
4808 }
4809
4810 if (!ep_dev_info->dev_ctrlstts_offset) {
4811 PCIE_DBG(dev,
4812 "RC%d endpoint does not support PCIe cap registers\n",
4813 dev->rc_idx);
4814 return;
4815 }
4816
4817 PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
4818 dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
4819
4820 /* Enable AER on EP */
4821 msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
4822 BIT(3)|BIT(2)|BIT(1)|BIT(0));
4823
4824 PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
4825 readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
4826}
4827
4828static int msm_pcie_config_device_table(struct device *dev, void *pdev)
4829{
4830 struct pci_dev *pcidev = to_pci_dev(dev);
4831 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
4832 struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
4833 struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
4834 int ret = 0;
4835 u32 rc_idx = pcie_dev->rc_idx;
4836 u32 i, index;
4837 u32 bdf = 0;
4838 u8 type;
4839 u32 h_type;
4840 u32 bme;
4841
4842 if (!pcidev) {
4843 PCIE_ERR(pcie_dev,
4844 "PCIe: Did not find PCI device in list for RC%d.\n",
4845 pcie_dev->rc_idx);
4846 return -ENODEV;
4847 }
4848
4849 PCIE_DBG(pcie_dev,
4850 "PCI device found: vendor-id:0x%x device-id:0x%x\n",
4851 pcidev->vendor, pcidev->device);
4852
4853 if (!pcidev->bus->number)
4854 return ret;
4855
4856 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4857 type = pcidev->bus->number == 1 ?
4858 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
4859
4860 for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
4861 if (msm_pcie_dev_tbl[i].bdf == bdf &&
4862 !msm_pcie_dev_tbl[i].dev) {
4863 for (index = 0; index < MAX_DEVICE_NUM; index++) {
4864 if (dev_table_t[index].bdf == bdf) {
4865 msm_pcie_dev_tbl[i].dev = pcidev;
4866 msm_pcie_dev_tbl[i].domain = rc_idx;
4867 msm_pcie_dev_tbl[i].conf_base =
4868 pcie_dev->conf + index * SZ_4K;
4869 msm_pcie_dev_tbl[i].phy_address =
4870 axi_conf->start + index * SZ_4K;
4871
4872 dev_table_t[index].dev = pcidev;
4873 dev_table_t[index].domain = rc_idx;
4874 dev_table_t[index].conf_base =
4875 pcie_dev->conf + index * SZ_4K;
4876 dev_table_t[index].phy_address =
4877 axi_conf->start + index * SZ_4K;
4878
4879 msm_pcie_iatu_config(pcie_dev, index,
4880 type,
4881 dev_table_t[index].phy_address,
4882 dev_table_t[index].phy_address
4883 + SZ_4K - 1,
4884 bdf);
4885
4886 h_type = readl_relaxed(
4887 dev_table_t[index].conf_base +
4888 PCIE20_HEADER_TYPE);
4889
4890 bme = readl_relaxed(
4891 dev_table_t[index].conf_base +
4892 PCIE20_COMMAND_STATUS);
4893
4894 if (h_type & (1 << 16)) {
4895 pci_write_config_dword(pcidev,
4896 PCIE20_COMMAND_STATUS,
4897 bme | 0x06);
4898 } else {
4899 pcie_dev->num_ep++;
4900 dev_table_t[index].registered =
4901 false;
4902 }
4903
4904 if (pcie_dev->num_ep > 1)
4905 pcie_dev->pending_ep_reg = true;
4906
4907 msm_pcie_config_ep_aer(pcie_dev,
4908 &dev_table_t[index]);
4909
4910 break;
4911 }
4912 }
4913 if (index == MAX_DEVICE_NUM) {
4914 PCIE_ERR(pcie_dev,
4915 "RC%d PCI device table is full.\n",
4916 rc_idx);
4917 ret = index;
4918 } else {
4919 break;
4920 }
4921 } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
4922 pcidev == msm_pcie_dev_tbl[i].dev) {
4923 break;
4924 }
4925 }
4926 if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
4927 PCIE_ERR(pcie_dev,
4928 "Global PCI device table is full: %d elements.\n",
4929 i);
4930 PCIE_ERR(pcie_dev,
4931 "Bus number is 0x%x\nDevice number is 0x%x\n",
4932 pcidev->bus->number, pcidev->devfn);
4933 ret = i;
4934 }
4935 return ret;
4936}
4937
4938int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
4939{
4940 struct pci_dev *pcidev;
4941 struct msm_pcie_dev_t *pcie_dev;
4942 struct pci_bus *bus;
4943 int i;
4944 u32 bdf;
4945
4946 if (!dev) {
4947 pr_err("%s: PCIe: endpoint device passed in is NULL\n",
4948 __func__);
4949 return MSM_PCIE_ERROR;
4950 }
4951
4952 pcidev = to_pci_dev(dev);
4953 if (!pcidev) {
4954 pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
4955 __func__);
4956 return MSM_PCIE_ERROR;
4957 }
4958
4959 bus = pcidev->bus;
4960 if (!bus) {
4961 pr_err("%s: PCIe: Bus of PCI device is NULL\n",
4962 __func__);
4963 return MSM_PCIE_ERROR;
4964 }
4965
4966 while (!pci_is_root_bus(bus))
4967 bus = bus->parent;
4968
4969 pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
4970 if (!pcie_dev) {
4971 pr_err("%s: PCIe: Could not get PCIe structure\n",
4972 __func__);
4973 return MSM_PCIE_ERROR;
4974 }
4975
4976 if (!pcie_dev->smmu_exist) {
4977 PCIE_DBG(pcie_dev,
4978 "PCIe: RC:%d: smmu does not exist\n",
4979 pcie_dev->rc_idx);
4980 return MSM_PCIE_ERROR;
4981 }
4982
4983 PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
4984 pcie_dev->rc_idx, dev);
4985 PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
4986 pcie_dev->rc_idx, pcidev);
4987
4988 *domain = pcie_dev->rc_idx;
4989
4990 if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
4991 pcie_dev->current_short_bdf++;
4992 } else {
4993 PCIE_ERR(pcie_dev,
4994 "PCIe: RC%d: No more short BDF left\n",
4995 pcie_dev->rc_idx);
4996 return MSM_PCIE_ERROR;
4997 }
4998
4999 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
5000
5001 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5002 if (pcie_dev->pcidev_table[i].bdf == bdf) {
5003 *sid = pcie_dev->smmu_sid_base +
5004 ((pcie_dev->rc_idx << 4) |
5005 pcie_dev->current_short_bdf);
5006
5007 msm_pcie_write_reg(pcie_dev->parf,
5008 PCIE20_PARF_BDF_TRANSLATE_N +
5009 pcie_dev->current_short_bdf * 4,
5010 bdf >> 16);
5011
5012 pcie_dev->pcidev_table[i].sid = *sid;
5013 pcie_dev->pcidev_table[i].short_bdf =
5014 pcie_dev->current_short_bdf;
5015 break;
5016 }
5017 }
5018
5019 if (i == MAX_DEVICE_NUM) {
5020 pcie_dev->current_short_bdf--;
5021 PCIE_ERR(pcie_dev,
5022 "PCIe: RC%d could not find BDF:%d\n",
5023 pcie_dev->rc_idx, bdf);
5024 return MSM_PCIE_ERROR;
5025 }
5026
5027 PCIE_DBG(pcie_dev,
5028 "PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
5029 pcie_dev->rc_idx,
5030 bdf >> 24,
5031 bdf >> 19 & 0x1f,
5032 bdf >> 16 & 0x07,
5033 *sid);
5034
5035 return 0;
5036}
5037EXPORT_SYMBOL(msm_pcie_configure_sid);
5038
5039int msm_pcie_enumerate(u32 rc_idx)
5040{
5041 int ret = 0, bus_ret = 0, scan_ret = 0;
5042 struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
5043
5044 mutex_lock(&dev->enumerate_lock);
5045
5046 PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
5047
5048 if (!dev->drv_ready) {
5049 PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
5050 rc_idx);
5051 ret = -EPROBE_DEFER;
5052 goto out;
5053 }
5054
5055 if (!dev->enumerated) {
5056 ret = msm_pcie_enable(dev, PM_ALL);
5057
5058 /* kick start ARM PCI configuration framework */
5059 if (!ret) {
5060 struct pci_dev *pcidev = NULL;
5061 bool found = false;
5062 struct pci_bus *bus;
5063 resource_size_t iobase = 0;
5064 u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
5065 u32 vendor_id = ids & 0xffff;
5066 u32 device_id = (ids & 0xffff0000) >> 16;
5067 LIST_HEAD(res);
5068
5069 PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
5070 vendor_id, device_id);
5071
5072 ret = of_pci_get_host_bridge_resources(
5073 dev->pdev->dev.of_node,
5074 0, 0xff, &res, &iobase);
5075 if (ret) {
5076 PCIE_ERR(dev,
5077 "PCIe: failed to get host bridge resources for RC%d: %d\n",
5078 dev->rc_idx, ret);
5079 goto out;
5080 }
5081
5082 bus = pci_create_root_bus(&dev->pdev->dev, 0,
5083 &msm_pcie_ops,
5084 msm_pcie_setup_sys_data(dev),
5085 &res);
5086 if (!bus) {
5087 PCIE_ERR(dev,
5088 "PCIe: failed to create root bus for RC%d\n",
5089 dev->rc_idx);
5090 ret = -ENOMEM;
5091 goto out;
5092 }
5093
5094 scan_ret = pci_scan_child_bus(bus);
5095 PCIE_DBG(dev,
5096 "PCIe: RC%d: The max subordinate bus number discovered is %d\n",
5097 dev->rc_idx, ret);
5098
5099 msm_pcie_fixup_irqs(dev);
5100 pci_assign_unassigned_bus_resources(bus);
5101 pci_bus_add_devices(bus);
5102
5103 dev->enumerated = true;
5104
5105 msm_pcie_write_mask(dev->dm_core +
5106 PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
5107
5108 if (dev->cpl_timeout && dev->bridge_found)
5109 msm_pcie_write_reg_field(dev->dm_core,
5110 PCIE20_DEVICE_CONTROL2_STATUS2,
5111 0xf, dev->cpl_timeout);
5112
5113 if (dev->shadow_en) {
5114 u32 val = readl_relaxed(dev->dm_core +
5115 PCIE20_COMMAND_STATUS);
5116 PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
5117 val);
5118 dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
5119 }
5120
5121 do {
5122 pcidev = pci_get_device(vendor_id,
5123 device_id, pcidev);
5124 if (pcidev && (&msm_pcie_dev[rc_idx] ==
5125 (struct msm_pcie_dev_t *)
5126 PCIE_BUS_PRIV_DATA(pcidev->bus))) {
5127 msm_pcie_dev[rc_idx].dev = pcidev;
5128 found = true;
5129 PCIE_DBG(&msm_pcie_dev[rc_idx],
5130 "PCI device is found for RC%d\n",
5131 rc_idx);
5132 }
5133 } while (!found && pcidev);
5134
5135 if (!pcidev) {
5136 PCIE_ERR(dev,
5137 "PCIe: Did not find PCI device for RC%d.\n",
5138 dev->rc_idx);
5139 ret = -ENODEV;
5140 goto out;
5141 }
5142
5143 bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
5144 &msm_pcie_config_device_table);
5145
5146 if (bus_ret) {
5147 PCIE_ERR(dev,
5148 "PCIe: Failed to set up device table for RC%d\n",
5149 dev->rc_idx);
5150 ret = -ENODEV;
5151 goto out;
5152 }
5153 } else {
5154 PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
5155 dev->rc_idx);
5156 }
5157 } else {
5158 PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
5159 dev->rc_idx);
5160 }
5161
5162out:
5163 mutex_unlock(&dev->enumerate_lock);
5164
5165 return ret;
5166}
5167EXPORT_SYMBOL(msm_pcie_enumerate);
5168
5169static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
5170 enum msm_pcie_event event)
5171{
5172 if (dev->event_reg && dev->event_reg->callback &&
5173 (dev->event_reg->events & event)) {
5174 struct msm_pcie_notify *notify = &dev->event_reg->notify;
5175
5176 notify->event = event;
5177 notify->user = dev->event_reg->user;
5178 PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
5179 dev->rc_idx, event);
5180 dev->event_reg->callback(notify);
5181
5182 if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
5183 (event == MSM_PCIE_EVENT_LINKDOWN)) {
5184 dev->user_suspend = true;
5185 PCIE_DBG(dev,
5186 "PCIe: Client of RC%d will recover the link later.\n",
5187 dev->rc_idx);
5188 return;
5189 }
5190 } else {
5191 PCIE_DBG2(dev,
5192 "PCIe: Client of RC%d does not have registration for event %d\n",
5193 dev->rc_idx, event);
5194 }
5195}
5196
5197static void handle_wake_func(struct work_struct *work)
5198{
5199 int i, ret;
5200 struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
5201 handle_wake_work);
5202
5203 PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
5204
5205 mutex_lock(&dev->recovery_lock);
5206
5207 if (!dev->enumerated) {
5208 PCIE_DBG(dev,
5209 "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
5210 dev->rc_idx);
5211
5212 ret = msm_pcie_enumerate(dev->rc_idx);
5213 if (ret) {
5214 PCIE_ERR(dev,
5215 "PCIe: failed to enable RC%d upon wake request from the device.\n",
5216 dev->rc_idx);
5217 goto out;
5218 }
5219
5220 if (dev->num_ep > 1) {
5221 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5222 dev->event_reg = dev->pcidev_table[i].event_reg;
5223
5224 if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
5225 && dev->event_reg &&
5226 dev->event_reg->callback &&
5227 (dev->event_reg->events &
5228 MSM_PCIE_EVENT_LINKUP)) {
5229 struct msm_pcie_notify *notify =
5230 &dev->event_reg->notify;
5231 notify->event = MSM_PCIE_EVENT_LINKUP;
5232 notify->user = dev->event_reg->user;
5233 PCIE_DBG(dev,
5234 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5235 dev->rc_idx);
5236 dev->event_reg->callback(notify);
5237 }
5238 }
5239 } else {
5240 if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
5241 dev->event_reg && dev->event_reg->callback &&
5242 (dev->event_reg->events &
5243 MSM_PCIE_EVENT_LINKUP)) {
5244 struct msm_pcie_notify *notify =
5245 &dev->event_reg->notify;
5246 notify->event = MSM_PCIE_EVENT_LINKUP;
5247 notify->user = dev->event_reg->user;
5248 PCIE_DBG(dev,
5249 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5250 dev->rc_idx);
5251 dev->event_reg->callback(notify);
5252 } else {
5253 PCIE_DBG(dev,
5254 "PCIe: Client of RC%d does not have registration for linkup event.\n",
5255 dev->rc_idx);
5256 }
5257 }
5258 goto out;
5259 } else {
5260 PCIE_ERR(dev,
5261 "PCIe: The enumeration for RC%d has already been done.\n",
5262 dev->rc_idx);
5263 goto out;
5264 }
5265
5266out:
5267 mutex_unlock(&dev->recovery_lock);
5268}
5269
5270static irqreturn_t handle_aer_irq(int irq, void *data)
5271{
5272 struct msm_pcie_dev_t *dev = data;
5273
5274 int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
5275 int ep_corr_val = 0, ep_uncorr_val = 0;
5276 int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
5277 u32 ep_dev_ctrlstts_offset = 0;
5278 int i, j, ep_src_bdf = 0;
5279 void __iomem *ep_base = NULL;
5280 unsigned long irqsave_flags;
5281
5282 PCIE_DBG2(dev,
5283 "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
5284 dev->rc_idx, irq, dev->rc_corr_counter,
5285 dev->rc_non_fatal_counter, dev->rc_fatal_counter,
5286 dev->ep_corr_counter, dev->ep_non_fatal_counter,
5287 dev->ep_fatal_counter);
5288
5289 spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
5290
5291 if (dev->suspending) {
5292 PCIE_DBG2(dev,
5293 "PCIe: RC%d is currently suspending.\n",
5294 dev->rc_idx);
5295 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5296 return IRQ_HANDLED;
5297 }
5298
5299 uncorr_val = readl_relaxed(dev->dm_core +
5300 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5301 corr_val = readl_relaxed(dev->dm_core +
5302 PCIE20_AER_CORR_ERR_STATUS_REG);
5303 rc_err_status = readl_relaxed(dev->dm_core +
5304 PCIE20_AER_ROOT_ERR_STATUS_REG);
5305 rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
5306 PCIE20_CAP_DEVCTRLSTATUS);
5307
5308 if (uncorr_val)
5309 PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5310 uncorr_val);
5311 if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
5312 PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5313 corr_val);
5314
5315 if ((rc_dev_ctrlstts >> 18) & 0x1)
5316 dev->rc_fatal_counter++;
5317 if ((rc_dev_ctrlstts >> 17) & 0x1)
5318 dev->rc_non_fatal_counter++;
5319 if ((rc_dev_ctrlstts >> 16) & 0x1)
5320 dev->rc_corr_counter++;
5321
5322 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
5323 BIT(18)|BIT(17)|BIT(16));
5324
5325 if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
5326 PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
5327 goto out;
5328 }
5329
5330 for (i = 0; i < 2; i++) {
5331 if (i)
5332 ep_src_bdf = readl_relaxed(dev->dm_core +
5333 PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
5334 else
5335 ep_src_bdf = (readl_relaxed(dev->dm_core +
5336 PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
5337
5338 if (!ep_src_bdf)
5339 continue;
5340
5341 for (j = 0; j < MAX_DEVICE_NUM; j++) {
5342 if (ep_src_bdf == dev->pcidev_table[j].bdf) {
5343 PCIE_DBG2(dev,
5344 "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
5345 i ? "Uncorrectable" : "Correctable",
5346 dev->pcidev_table[j].bdf >> 24,
5347 dev->pcidev_table[j].bdf >> 19 & 0x1f,
5348 dev->pcidev_table[j].bdf >> 16 & 0x07);
5349 ep_base = dev->pcidev_table[j].conf_base;
5350 ep_dev_ctrlstts_offset = dev->
5351 pcidev_table[j].dev_ctrlstts_offset;
5352 break;
5353 }
5354 }
5355
5356 if (!ep_base) {
5357 PCIE_ERR(dev,
5358 "PCIe: RC%d no endpoint found for reported error\n",
5359 dev->rc_idx);
5360 goto out;
5361 }
5362
5363 ep_uncorr_val = readl_relaxed(ep_base +
5364 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5365 ep_corr_val = readl_relaxed(ep_base +
5366 PCIE20_AER_CORR_ERR_STATUS_REG);
5367 ep_dev_ctrlstts = readl_relaxed(ep_base +
5368 ep_dev_ctrlstts_offset);
5369
5370 if (ep_uncorr_val)
5371 PCIE_DBG(dev,
5372 "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5373 ep_uncorr_val);
5374 if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
5375 PCIE_DBG(dev,
5376 "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5377 ep_corr_val);
5378
5379 if ((ep_dev_ctrlstts >> 18) & 0x1)
5380 dev->ep_fatal_counter++;
5381 if ((ep_dev_ctrlstts >> 17) & 0x1)
5382 dev->ep_non_fatal_counter++;
5383 if ((ep_dev_ctrlstts >> 16) & 0x1)
5384 dev->ep_corr_counter++;
5385
5386 msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
5387 BIT(18)|BIT(17)|BIT(16));
5388
5389 msm_pcie_write_reg_field(ep_base,
5390 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5391 0x3fff031, 0x3fff031);
5392 msm_pcie_write_reg_field(ep_base,
5393 PCIE20_AER_CORR_ERR_STATUS_REG,
5394 0xf1c1, 0xf1c1);
5395 }
5396out:
5397 if (((dev->rc_corr_counter < corr_counter_limit) &&
5398 (dev->ep_corr_counter < corr_counter_limit)) ||
5399 uncorr_val || ep_uncorr_val)
5400 PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
5401 rc_err_status);
5402 msm_pcie_write_reg_field(dev->dm_core,
5403 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5404 0x3fff031, 0x3fff031);
5405 msm_pcie_write_reg_field(dev->dm_core,
5406 PCIE20_AER_CORR_ERR_STATUS_REG,
5407 0xf1c1, 0xf1c1);
5408 msm_pcie_write_reg_field(dev->dm_core,
5409 PCIE20_AER_ROOT_ERR_STATUS_REG,
5410 0x7f, 0x7f);
5411
5412 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5413 return IRQ_HANDLED;
5414}
5415
5416static irqreturn_t handle_wake_irq(int irq, void *data)
5417{
5418 struct msm_pcie_dev_t *dev = data;
5419 unsigned long irqsave_flags;
5420 int i;
5421
5422 spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
5423
5424 dev->wake_counter++;
5425 PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
5426 dev->wake_counter, dev->rc_idx);
5427
5428 PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
5429 dev->rc_idx);
5430
Tony Truong9f2c7722017-02-28 15:02:27 -08005431 if (!dev->enumerated && !(dev->boot_option &
5432 MSM_PCIE_NO_WAKE_ENUMERATION)) {
5433 PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
5434 schedule_work(&dev->handle_wake_work);
Tony Truong349ee492014-10-01 17:35:56 -07005435 } else {
5436 PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
5437 __pm_stay_awake(&dev->ws);
5438 __pm_relax(&dev->ws);
5439
5440 if (dev->num_ep > 1) {
5441 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5442 dev->event_reg =
5443 dev->pcidev_table[i].event_reg;
5444 msm_pcie_notify_client(dev,
5445 MSM_PCIE_EVENT_WAKEUP);
5446 }
5447 } else {
5448 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
5449 }
5450 }
5451
5452 spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
5453
5454 return IRQ_HANDLED;
5455}
5456
5457static irqreturn_t handle_linkdown_irq(int irq, void *data)
5458{
5459 struct msm_pcie_dev_t *dev = data;
5460 unsigned long irqsave_flags;
5461 int i;
5462
5463 spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
5464
5465 dev->linkdown_counter++;
5466
5467 PCIE_DBG(dev,
5468 "PCIe: No. %ld linkdown IRQ for RC%d.\n",
5469 dev->linkdown_counter, dev->rc_idx);
5470
5471 if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
5472 PCIE_DBG(dev,
5473 "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
5474 dev->rc_idx);
5475 } else if (dev->suspending) {
5476 PCIE_DBG(dev,
5477 "PCIe:the link of RC%d is suspending.\n",
5478 dev->rc_idx);
5479 } else {
5480 dev->link_status = MSM_PCIE_LINK_DISABLED;
5481 dev->shadow_en = false;
5482
5483 if (dev->linkdown_panic)
5484 panic("User has chosen to panic on linkdown\n");
5485
5486 /* assert PERST */
5487 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
5488 dev->gpio[MSM_PCIE_GPIO_PERST].on);
5489 PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
5490
5491 if (dev->num_ep > 1) {
5492 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5493 dev->event_reg =
5494 dev->pcidev_table[i].event_reg;
5495 msm_pcie_notify_client(dev,
5496 MSM_PCIE_EVENT_LINKDOWN);
5497 }
5498 } else {
5499 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
5500 }
5501 }
5502
5503 spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
5504
5505 return IRQ_HANDLED;
5506}
5507
5508static irqreturn_t handle_msi_irq(int irq, void *data)
5509{
5510 int i, j;
5511 unsigned long val;
5512 struct msm_pcie_dev_t *dev = data;
5513 void __iomem *ctrl_status;
5514
5515 PCIE_DUMP(dev, "irq: %d\n", irq);
5516
5517 /*
5518 * check for set bits, clear it by setting that bit
5519 * and trigger corresponding irq
5520 */
5521 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
5522 ctrl_status = dev->dm_core +
5523 PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
5524
5525 val = readl_relaxed(ctrl_status);
5526 while (val) {
5527 j = find_first_bit(&val, 32);
5528 writel_relaxed(BIT(j), ctrl_status);
5529 /* ensure that interrupt is cleared (acked) */
5530 wmb();
5531 generic_handle_irq(
5532 irq_find_mapping(dev->irq_domain, (j + (32*i)))
5533 );
5534 val = readl_relaxed(ctrl_status);
5535 }
5536 }
5537
5538 return IRQ_HANDLED;
5539}
5540
5541static irqreturn_t handle_global_irq(int irq, void *data)
5542{
5543 int i;
5544 struct msm_pcie_dev_t *dev = data;
5545 unsigned long irqsave_flags;
5546 u32 status = 0;
5547
5548 spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
5549
5550 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
5551 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
5552
5553 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
5554
5555 PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
5556 dev->rc_idx, irq, status);
5557
5558 for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
5559 if (status & BIT(i)) {
5560 switch (i) {
5561 case MSM_PCIE_INT_EVT_LINK_DOWN:
5562 PCIE_DBG(dev,
5563 "PCIe: RC%d: handle linkdown event.\n",
5564 dev->rc_idx);
5565 handle_linkdown_irq(irq, data);
5566 break;
5567 case MSM_PCIE_INT_EVT_AER_LEGACY:
5568 PCIE_DBG(dev,
5569 "PCIe: RC%d: AER legacy event.\n",
5570 dev->rc_idx);
5571 handle_aer_irq(irq, data);
5572 break;
5573 case MSM_PCIE_INT_EVT_AER_ERR:
5574 PCIE_DBG(dev,
5575 "PCIe: RC%d: AER event.\n",
5576 dev->rc_idx);
5577 handle_aer_irq(irq, data);
5578 break;
5579 default:
Tony Truong3f110d42017-04-07 17:12:23 -07005580 PCIE_DUMP(dev,
Tony Truong349ee492014-10-01 17:35:56 -07005581 "PCIe: RC%d: Unexpected event %d is caught!\n",
5582 dev->rc_idx, i);
5583 }
5584 }
5585 }
5586
5587 spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
5588
5589 return IRQ_HANDLED;
5590}
5591
Tony Truong52122a62017-03-23 18:00:34 -07005592static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
5593 struct pci_dev *pdev)
5594{
5595 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
5596 int bypass_en = 0;
5597
5598 if (!domain) {
5599 PCIE_DBG(dev,
5600 "PCIe: RC%d: client does not have an iommu domain\n",
5601 dev->rc_idx);
5602 return;
5603 }
5604
5605 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
5606 if (!bypass_en) {
5607 int ret;
5608 phys_addr_t pcie_base_addr =
5609 dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
5610 dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
5611
5612 ret = iommu_unmap(domain, iova, PAGE_SIZE);
5613 if (ret != PAGE_SIZE)
5614 PCIE_ERR(dev,
5615 "PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
5616 dev->rc_idx, ret);
5617 }
5618}
5619
Tony Truongc3c52ae2017-03-29 12:16:51 -07005620void msm_pcie_destroy_irq(unsigned int irq)
Tony Truong349ee492014-10-01 17:35:56 -07005621{
Tony Truongc3c52ae2017-03-29 12:16:51 -07005622 int pos;
5623 struct pci_dev *pdev = irq_get_chip_data(irq);
5624 struct msi_desc *entry = irq_get_msi_desc(irq);
5625 struct msi_desc *firstentry;
Tony Truong349ee492014-10-01 17:35:56 -07005626 struct msm_pcie_dev_t *dev;
Tony Truongc3c52ae2017-03-29 12:16:51 -07005627 u32 nvec;
5628 int firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07005629
Tony Truongc3c52ae2017-03-29 12:16:51 -07005630 if (!pdev) {
5631 pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
Tony Truong349ee492014-10-01 17:35:56 -07005632 return;
5633 }
5634
Tony Truongc3c52ae2017-03-29 12:16:51 -07005635 dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5636 if (!dev) {
5637 pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
5638 return;
5639 }
5640
5641 if (!entry) {
5642 PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
5643 dev->rc_idx, irq);
5644 return;
5645 }
5646
5647 firstentry = first_pci_msi_entry(pdev);
5648 if (!firstentry) {
5649 PCIE_ERR(dev,
5650 "PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
5651 dev->rc_idx, irq);
5652 return;
5653 }
5654
5655 firstirq = firstentry->irq;
5656 nvec = (1 << entry->msi_attrib.multiple);
5657
Tony Truong349ee492014-10-01 17:35:56 -07005658 if (dev->msi_gicm_addr) {
5659 PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
5660
Tony Truongc3c52ae2017-03-29 12:16:51 -07005661 if (irq < firstirq || irq > firstirq + nvec - 1) {
Tony Truong349ee492014-10-01 17:35:56 -07005662 PCIE_ERR(dev,
5663 "Could not find irq: %d in RC%d MSI table\n",
5664 irq, dev->rc_idx);
5665 return;
5666 }
Tony Truong52122a62017-03-23 18:00:34 -07005667 if (irq == firstirq + nvec - 1)
5668 msm_pcie_unmap_qgic_addr(dev, pdev);
Tony Truongc3c52ae2017-03-29 12:16:51 -07005669 pos = irq - firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07005670 } else {
5671 PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
5672 pos = irq - irq_find_mapping(dev->irq_domain, 0);
5673 }
5674
5675 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5676
5677 PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
5678 pos, *dev->msi_irq_in_use);
5679 clear_bit(pos, dev->msi_irq_in_use);
5680 PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
5681 pos, *dev->msi_irq_in_use);
5682}
5683
5684/* hookup to linux pci msi framework */
5685void arch_teardown_msi_irq(unsigned int irq)
5686{
5687 PCIE_GEN_DBG("irq %d deallocated\n", irq);
Tony Truongc3c52ae2017-03-29 12:16:51 -07005688 msm_pcie_destroy_irq(irq);
Tony Truong349ee492014-10-01 17:35:56 -07005689}
5690
5691void arch_teardown_msi_irqs(struct pci_dev *dev)
5692{
5693 struct msi_desc *entry;
5694 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5695
5696 PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
5697 pcie_dev->rc_idx, dev->vendor, dev->device);
5698
5699 pcie_dev->use_msi = false;
5700
5701 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5702 int i, nvec;
5703
5704 if (entry->irq == 0)
5705 continue;
5706 nvec = 1 << entry->msi_attrib.multiple;
5707 for (i = 0; i < nvec; i++)
Tony Truongc3c52ae2017-03-29 12:16:51 -07005708 arch_teardown_msi_irq(entry->irq + i);
Tony Truong349ee492014-10-01 17:35:56 -07005709 }
5710}
5711
5712static void msm_pcie_msi_nop(struct irq_data *d)
5713{
5714}
5715
5716static struct irq_chip pcie_msi_chip = {
5717 .name = "msm-pcie-msi",
5718 .irq_ack = msm_pcie_msi_nop,
5719 .irq_enable = unmask_msi_irq,
5720 .irq_disable = mask_msi_irq,
5721 .irq_mask = mask_msi_irq,
5722 .irq_unmask = unmask_msi_irq,
5723};
5724
5725static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
5726{
5727 int irq, pos;
5728
5729 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5730
5731again:
5732 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5733
5734 if (pos >= PCIE_MSI_NR_IRQS)
5735 return -ENOSPC;
5736
5737 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5738
5739 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5740 goto again;
5741 else
5742 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5743
5744 irq = irq_create_mapping(dev->irq_domain, pos);
5745 if (!irq)
5746 return -EINVAL;
5747
5748 return irq;
5749}
5750
5751static int arch_setup_msi_irq_default(struct pci_dev *pdev,
5752 struct msi_desc *desc, int nvec)
5753{
5754 int irq;
5755 struct msi_msg msg;
5756 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5757
5758 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5759
5760 irq = msm_pcie_create_irq(dev);
5761
5762 PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
5763
5764 if (irq < 0)
5765 return irq;
5766
5767 PCIE_DBG(dev, "irq %d allocated\n", irq);
5768
Tony Truongc3c52ae2017-03-29 12:16:51 -07005769 irq_set_chip_data(irq, pdev);
Tony Truong349ee492014-10-01 17:35:56 -07005770 irq_set_msi_desc(irq, desc);
5771
5772 /* write msi vector and data */
5773 msg.address_hi = 0;
5774 msg.address_lo = MSM_PCIE_MSI_PHY;
5775 msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
5776 write_msi_msg(irq, &msg);
5777
5778 return 0;
5779}
5780
5781static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
5782{
5783 int irq, pos;
5784
5785 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5786
5787again:
5788 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5789
5790 if (pos >= PCIE_MSI_NR_IRQS)
5791 return -ENOSPC;
5792
5793 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5794
5795 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5796 goto again;
5797 else
5798 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5799
5800 if (pos >= MSM_PCIE_MAX_MSI) {
5801 PCIE_ERR(dev,
5802 "PCIe: RC%d: pos %d is not less than %d\n",
5803 dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
5804 return MSM_PCIE_ERROR;
5805 }
5806
5807 irq = dev->msi[pos].num;
5808 if (!irq) {
5809 PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
5810 dev->rc_idx);
5811 return -EINVAL;
5812 }
5813
5814 return irq;
5815}
5816
Tony Truong52122a62017-03-23 18:00:34 -07005817static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
5818 struct pci_dev *pdev,
5819 struct msi_msg *msg)
5820{
5821 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
Tony Truong39a13792017-04-07 18:33:10 -07005822 struct iommu_domain_geometry geometry;
5823 int ret, fastmap_en = 0, bypass_en = 0;
Tony Truong52122a62017-03-23 18:00:34 -07005824 dma_addr_t iova;
Tony Truong39a13792017-04-07 18:33:10 -07005825 phys_addr_t gicm_db_offset;
Tony Truong52122a62017-03-23 18:00:34 -07005826
5827 msg->address_hi = 0;
5828 msg->address_lo = dev->msi_gicm_addr;
5829
5830 if (!domain) {
5831 PCIE_DBG(dev,
5832 "PCIe: RC%d: client does not have an iommu domain\n",
5833 dev->rc_idx);
5834 return 0;
5835 }
5836
5837 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
5838
5839 PCIE_DBG(dev,
5840 "PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
5841 dev->rc_idx, bypass_en ? "bypass" : "enabled",
5842 pdev->bus->number, pdev->devfn);
5843
5844 if (bypass_en)
5845 return 0;
5846
Tony Truong39a13792017-04-07 18:33:10 -07005847 iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap_en);
5848 if (fastmap_en) {
5849 iommu_domain_get_attr(domain, DOMAIN_ATTR_GEOMETRY, &geometry);
5850 iova = geometry.aperture_start;
5851 PCIE_DBG(dev,
5852 "PCIe: RC%d: Use client's IOVA 0x%llx to map QGIC MSI address\n",
5853 dev->rc_idx, iova);
5854 } else {
5855 phys_addr_t pcie_base_addr;
5856
5857 /*
5858 * Use PCIe DBI address as the IOVA since client cannot
5859 * use this address for their IOMMU mapping. This will
5860 * prevent any conflicts between PCIe host and
5861 * client's mapping.
5862 */
5863 pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
5864 iova = rounddown(pcie_base_addr, PAGE_SIZE);
5865 }
Tony Truong52122a62017-03-23 18:00:34 -07005866
5867 ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
5868 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
5869 if (ret < 0) {
5870 PCIE_ERR(dev,
5871 "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
5872 dev->rc_idx, ret);
5873 return -ENOMEM;
5874 }
5875
Tony Truong39a13792017-04-07 18:33:10 -07005876 gicm_db_offset = dev->msi_gicm_addr -
5877 rounddown(dev->msi_gicm_addr, PAGE_SIZE);
Tony Truong52122a62017-03-23 18:00:34 -07005878 msg->address_lo = iova + gicm_db_offset;
5879
5880 return 0;
5881}
5882
Tony Truong349ee492014-10-01 17:35:56 -07005883static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
5884 struct msi_desc *desc, int nvec)
5885{
Tony Truong52122a62017-03-23 18:00:34 -07005886 int irq, index, ret, firstirq = 0;
Tony Truong349ee492014-10-01 17:35:56 -07005887 struct msi_msg msg;
5888 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5889
5890 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5891
5892 for (index = 0; index < nvec; index++) {
5893 irq = msm_pcie_create_irq_qgic(dev);
5894 PCIE_DBG(dev, "irq %d is allocated\n", irq);
5895
5896 if (irq < 0)
5897 return irq;
5898
5899 if (index == 0)
5900 firstirq = irq;
5901
5902 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
Tony Truongc3c52ae2017-03-29 12:16:51 -07005903 irq_set_chip_data(irq, pdev);
Tony Truong349ee492014-10-01 17:35:56 -07005904 }
5905
5906 /* write msi vector and data */
5907 irq_set_msi_desc(firstirq, desc);
Tony Truong52122a62017-03-23 18:00:34 -07005908
5909 ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
5910 if (ret)
5911 return ret;
5912
Tony Truong349ee492014-10-01 17:35:56 -07005913 msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
5914 write_msi_msg(firstirq, &msg);
5915
5916 return 0;
5917}
5918
5919int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
5920{
5921 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5922
5923 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5924
5925 if (dev->msi_gicm_addr)
5926 return arch_setup_msi_irq_qgic(pdev, desc, 1);
5927 else
5928 return arch_setup_msi_irq_default(pdev, desc, 1);
5929}
5930
5931static int msm_pcie_get_msi_multiple(int nvec)
5932{
5933 int msi_multiple = 0;
5934
5935 while (nvec) {
5936 nvec = nvec >> 1;
5937 msi_multiple++;
5938 }
5939 PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
5940 msi_multiple - 1);
5941
5942 return msi_multiple - 1;
5943}
5944
5945int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
5946{
5947 struct msi_desc *entry;
5948 int ret;
5949 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5950
5951 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5952
5953 if (type != PCI_CAP_ID_MSI || nvec > 32)
5954 return -ENOSPC;
5955
5956 PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
5957
5958 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5959 entry->msi_attrib.multiple =
5960 msm_pcie_get_msi_multiple(nvec);
5961
5962 if (pcie_dev->msi_gicm_addr)
5963 ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
5964 else
5965 ret = arch_setup_msi_irq_default(dev, entry, nvec);
5966
5967 PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
5968
5969 if (ret < 0)
5970 return ret;
5971 if (ret > 0)
5972 return -ENOSPC;
5973 }
5974
5975 pcie_dev->use_msi = true;
5976
5977 return 0;
5978}
5979
5980static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
5981 irq_hw_number_t hwirq)
5982{
5983 irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
Tony Truong349ee492014-10-01 17:35:56 -07005984 return 0;
5985}
5986
5987static const struct irq_domain_ops msm_pcie_msi_ops = {
5988 .map = msm_pcie_msi_map,
5989};
5990
5991int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
5992{
5993 int rc;
5994 int msi_start = 0;
5995 struct device *pdev = &dev->pdev->dev;
5996
5997 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5998
5999 if (dev->rc_idx)
6000 wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
6001 else
6002 wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
6003
6004 /* register handler for linkdown interrupt */
6005 if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
6006 rc = devm_request_irq(pdev,
6007 dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
6008 handle_linkdown_irq,
6009 IRQF_TRIGGER_RISING,
6010 dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
6011 dev);
6012 if (rc) {
6013 PCIE_ERR(dev,
6014 "PCIe: Unable to request linkdown interrupt:%d\n",
6015 dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
6016 return rc;
6017 }
6018 }
6019
6020 /* register handler for physical MSI interrupt line */
6021 if (dev->irq[MSM_PCIE_INT_MSI].num) {
6022 rc = devm_request_irq(pdev,
6023 dev->irq[MSM_PCIE_INT_MSI].num,
6024 handle_msi_irq,
6025 IRQF_TRIGGER_RISING,
6026 dev->irq[MSM_PCIE_INT_MSI].name,
6027 dev);
6028 if (rc) {
6029 PCIE_ERR(dev,
6030 "PCIe: RC%d: Unable to request MSI interrupt\n",
6031 dev->rc_idx);
6032 return rc;
6033 }
6034 }
6035
6036 /* register handler for AER interrupt */
6037 if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
6038 rc = devm_request_irq(pdev,
6039 dev->irq[MSM_PCIE_INT_PLS_ERR].num,
6040 handle_aer_irq,
6041 IRQF_TRIGGER_RISING,
6042 dev->irq[MSM_PCIE_INT_PLS_ERR].name,
6043 dev);
6044 if (rc) {
6045 PCIE_ERR(dev,
6046 "PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
6047 dev->rc_idx,
6048 dev->irq[MSM_PCIE_INT_PLS_ERR].num);
6049 return rc;
6050 }
6051 }
6052
6053 /* register handler for AER legacy interrupt */
6054 if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
6055 rc = devm_request_irq(pdev,
6056 dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
6057 handle_aer_irq,
6058 IRQF_TRIGGER_RISING,
6059 dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
6060 dev);
6061 if (rc) {
6062 PCIE_ERR(dev,
6063 "PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
6064 dev->rc_idx,
6065 dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
6066 return rc;
6067 }
6068 }
6069
6070 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
6071 rc = devm_request_irq(pdev,
6072 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
6073 handle_global_irq,
6074 IRQF_TRIGGER_RISING,
6075 dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
6076 dev);
6077 if (rc) {
6078 PCIE_ERR(dev,
6079 "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
6080 dev->rc_idx,
6081 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
6082 return rc;
6083 }
6084 }
6085
6086 /* register handler for PCIE_WAKE_N interrupt line */
6087 if (dev->wake_n) {
6088 rc = devm_request_irq(pdev,
6089 dev->wake_n, handle_wake_irq,
6090 IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
6091 if (rc) {
6092 PCIE_ERR(dev,
6093 "PCIe: RC%d: Unable to request wake interrupt\n",
6094 dev->rc_idx);
6095 return rc;
6096 }
6097
6098 INIT_WORK(&dev->handle_wake_work, handle_wake_func);
6099
6100 rc = enable_irq_wake(dev->wake_n);
6101 if (rc) {
6102 PCIE_ERR(dev,
6103 "PCIe: RC%d: Unable to enable wake interrupt\n",
6104 dev->rc_idx);
6105 return rc;
6106 }
6107 }
6108
6109 /* Create a virtual domain of interrupts */
6110 if (!dev->msi_gicm_addr) {
6111 dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
6112 PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
6113
6114 if (!dev->irq_domain) {
6115 PCIE_ERR(dev,
6116 "PCIe: RC%d: Unable to initialize irq domain\n",
6117 dev->rc_idx);
6118
6119 if (dev->wake_n)
6120 disable_irq(dev->wake_n);
6121
6122 return PTR_ERR(dev->irq_domain);
6123 }
6124
6125 msi_start = irq_create_mapping(dev->irq_domain, 0);
6126 }
6127
6128 return 0;
6129}
6130
6131void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
6132{
6133 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
6134
6135 wakeup_source_trash(&dev->ws);
6136
6137 if (dev->wake_n)
6138 disable_irq(dev->wake_n);
6139}
6140
6141
6142static int msm_pcie_probe(struct platform_device *pdev)
6143{
6144 int ret = 0;
6145 int rc_idx = -1;
6146 int i, j;
6147
6148 PCIE_GEN_DBG("%s\n", __func__);
6149
6150 mutex_lock(&pcie_drv.drv_lock);
6151
6152 ret = of_property_read_u32((&pdev->dev)->of_node,
6153 "cell-index", &rc_idx);
6154 if (ret) {
6155 PCIE_GEN_DBG("Did not find RC index.\n");
6156 goto out;
6157 } else {
6158 if (rc_idx >= MAX_RC_NUM) {
6159 pr_err(
6160 "PCIe: Invalid RC Index %d (max supported = %d)\n",
6161 rc_idx, MAX_RC_NUM);
6162 goto out;
6163 }
6164 pcie_drv.rc_num++;
6165 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
6166 rc_idx);
6167 }
6168
6169 msm_pcie_dev[rc_idx].l0s_supported =
6170 of_property_read_bool((&pdev->dev)->of_node,
6171 "qcom,l0s-supported");
6172 PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
6173 msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
6174 msm_pcie_dev[rc_idx].l1_supported =
6175 of_property_read_bool((&pdev->dev)->of_node,
6176 "qcom,l1-supported");
6177 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
6178 msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
6179 msm_pcie_dev[rc_idx].l1ss_supported =
6180 of_property_read_bool((&pdev->dev)->of_node,
6181 "qcom,l1ss-supported");
6182 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
6183 msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
6184 msm_pcie_dev[rc_idx].common_clk_en =
6185 of_property_read_bool((&pdev->dev)->of_node,
6186 "qcom,common-clk-en");
6187 PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
6188 msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
6189 msm_pcie_dev[rc_idx].clk_power_manage_en =
6190 of_property_read_bool((&pdev->dev)->of_node,
6191 "qcom,clk-power-manage-en");
6192 PCIE_DBG(&msm_pcie_dev[rc_idx],
6193 "Clock power management is %s enabled.\n",
6194 msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
6195 msm_pcie_dev[rc_idx].aux_clk_sync =
6196 of_property_read_bool((&pdev->dev)->of_node,
6197 "qcom,aux-clk-sync");
6198 PCIE_DBG(&msm_pcie_dev[rc_idx],
6199 "AUX clock is %s synchronous to Core clock.\n",
6200 msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
6201
6202 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
6203 of_property_read_bool((&pdev->dev)->of_node,
6204 "qcom,use-19p2mhz-aux-clk");
6205 PCIE_DBG(&msm_pcie_dev[rc_idx],
6206 "AUX clock frequency is %s 19.2MHz.\n",
6207 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
6208
6209 msm_pcie_dev[rc_idx].smmu_exist =
6210 of_property_read_bool((&pdev->dev)->of_node,
6211 "qcom,smmu-exist");
6212 PCIE_DBG(&msm_pcie_dev[rc_idx],
6213 "SMMU does %s exist.\n",
6214 msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
6215
6216 msm_pcie_dev[rc_idx].smmu_sid_base = 0;
6217 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
6218 &msm_pcie_dev[rc_idx].smmu_sid_base);
6219 if (ret)
6220 PCIE_DBG(&msm_pcie_dev[rc_idx],
6221 "RC%d SMMU sid base not found\n",
6222 msm_pcie_dev[rc_idx].rc_idx);
6223 else
6224 PCIE_DBG(&msm_pcie_dev[rc_idx],
6225 "RC%d: qcom,smmu-sid-base: 0x%x.\n",
6226 msm_pcie_dev[rc_idx].rc_idx,
6227 msm_pcie_dev[rc_idx].smmu_sid_base);
6228
Tony Truong9f2c7722017-02-28 15:02:27 -08006229 msm_pcie_dev[rc_idx].boot_option = 0;
6230 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,boot-option",
6231 &msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07006232 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08006233 "PCIe: RC%d boot option is 0x%x.\n",
6234 rc_idx, msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07006235
6236 msm_pcie_dev[rc_idx].phy_ver = 1;
6237 ret = of_property_read_u32((&pdev->dev)->of_node,
6238 "qcom,pcie-phy-ver",
6239 &msm_pcie_dev[rc_idx].phy_ver);
6240 if (ret)
6241 PCIE_DBG(&msm_pcie_dev[rc_idx],
6242 "RC%d: pcie-phy-ver does not exist.\n",
6243 msm_pcie_dev[rc_idx].rc_idx);
6244 else
6245 PCIE_DBG(&msm_pcie_dev[rc_idx],
6246 "RC%d: pcie-phy-ver: %d.\n",
6247 msm_pcie_dev[rc_idx].rc_idx,
6248 msm_pcie_dev[rc_idx].phy_ver);
6249
6250 msm_pcie_dev[rc_idx].n_fts = 0;
6251 ret = of_property_read_u32((&pdev->dev)->of_node,
6252 "qcom,n-fts",
6253 &msm_pcie_dev[rc_idx].n_fts);
6254
6255 if (ret)
6256 PCIE_DBG(&msm_pcie_dev[rc_idx],
6257 "n-fts does not exist. ret=%d\n", ret);
6258 else
6259 PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
6260 msm_pcie_dev[rc_idx].n_fts);
6261
6262 msm_pcie_dev[rc_idx].common_phy =
6263 of_property_read_bool((&pdev->dev)->of_node,
6264 "qcom,common-phy");
6265 PCIE_DBG(&msm_pcie_dev[rc_idx],
6266 "PCIe: RC%d: Common PHY does %s exist.\n",
6267 rc_idx, msm_pcie_dev[rc_idx].common_phy ? "" : "not");
6268
6269 msm_pcie_dev[rc_idx].ext_ref_clk =
6270 of_property_read_bool((&pdev->dev)->of_node,
6271 "qcom,ext-ref-clk");
6272 PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
6273 msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
6274
6275 msm_pcie_dev[rc_idx].ep_latency = 0;
6276 ret = of_property_read_u32((&pdev->dev)->of_node,
6277 "qcom,ep-latency",
6278 &msm_pcie_dev[rc_idx].ep_latency);
6279 if (ret)
6280 PCIE_DBG(&msm_pcie_dev[rc_idx],
6281 "RC%d: ep-latency does not exist.\n",
6282 rc_idx);
6283 else
6284 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
6285 rc_idx, msm_pcie_dev[rc_idx].ep_latency);
6286
6287 msm_pcie_dev[rc_idx].wr_halt_size = 0;
6288 ret = of_property_read_u32(pdev->dev.of_node,
6289 "qcom,wr-halt-size",
6290 &msm_pcie_dev[rc_idx].wr_halt_size);
6291 if (ret)
6292 PCIE_DBG(&msm_pcie_dev[rc_idx],
6293 "RC%d: wr-halt-size not specified in dt. Use default value.\n",
6294 rc_idx);
6295 else
6296 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
6297 rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
6298
6299 msm_pcie_dev[rc_idx].cpl_timeout = 0;
6300 ret = of_property_read_u32((&pdev->dev)->of_node,
6301 "qcom,cpl-timeout",
6302 &msm_pcie_dev[rc_idx].cpl_timeout);
6303 if (ret)
6304 PCIE_DBG(&msm_pcie_dev[rc_idx],
6305 "RC%d: Using default cpl-timeout.\n",
6306 rc_idx);
6307 else
6308 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
6309 rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
6310
6311 msm_pcie_dev[rc_idx].perst_delay_us_min =
6312 PERST_PROPAGATION_DELAY_US_MIN;
6313 ret = of_property_read_u32(pdev->dev.of_node,
6314 "qcom,perst-delay-us-min",
6315 &msm_pcie_dev[rc_idx].perst_delay_us_min);
6316 if (ret)
6317 PCIE_DBG(&msm_pcie_dev[rc_idx],
6318 "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
6319 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6320 else
6321 PCIE_DBG(&msm_pcie_dev[rc_idx],
6322 "RC%d: perst-delay-us-min: %dus.\n",
6323 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6324
6325 msm_pcie_dev[rc_idx].perst_delay_us_max =
6326 PERST_PROPAGATION_DELAY_US_MAX;
6327 ret = of_property_read_u32(pdev->dev.of_node,
6328 "qcom,perst-delay-us-max",
6329 &msm_pcie_dev[rc_idx].perst_delay_us_max);
6330 if (ret)
6331 PCIE_DBG(&msm_pcie_dev[rc_idx],
6332 "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
6333 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6334 else
6335 PCIE_DBG(&msm_pcie_dev[rc_idx],
6336 "RC%d: perst-delay-us-max: %dus.\n",
6337 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6338
6339 msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
6340 ret = of_property_read_u32(pdev->dev.of_node,
6341 "qcom,tlp-rd-size",
6342 &msm_pcie_dev[rc_idx].tlp_rd_size);
6343 if (ret)
6344 PCIE_DBG(&msm_pcie_dev[rc_idx],
6345 "RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
6346 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6347 else
6348 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
6349 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6350
6351 msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
6352 msm_pcie_dev[rc_idx].msi_gicm_base = 0;
6353 ret = of_property_read_u32((&pdev->dev)->of_node,
6354 "qcom,msi-gicm-addr",
6355 &msm_pcie_dev[rc_idx].msi_gicm_addr);
6356
6357 if (ret) {
6358 PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
6359 "msi-gicm-addr does not exist.\n");
6360 } else {
6361 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
6362 msm_pcie_dev[rc_idx].msi_gicm_addr);
6363
6364 ret = of_property_read_u32((&pdev->dev)->of_node,
6365 "qcom,msi-gicm-base",
6366 &msm_pcie_dev[rc_idx].msi_gicm_base);
6367
6368 if (ret) {
6369 PCIE_ERR(&msm_pcie_dev[rc_idx],
6370 "PCIe: RC%d: msi-gicm-base does not exist.\n",
6371 rc_idx);
6372 goto decrease_rc_num;
6373 } else {
6374 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
6375 msm_pcie_dev[rc_idx].msi_gicm_base);
6376 }
6377 }
6378
6379 msm_pcie_dev[rc_idx].scm_dev_id = 0;
6380 ret = of_property_read_u32((&pdev->dev)->of_node,
6381 "qcom,scm-dev-id",
6382 &msm_pcie_dev[rc_idx].scm_dev_id);
6383
6384 msm_pcie_dev[rc_idx].rc_idx = rc_idx;
6385 msm_pcie_dev[rc_idx].pdev = pdev;
6386 msm_pcie_dev[rc_idx].vreg_n = 0;
6387 msm_pcie_dev[rc_idx].gpio_n = 0;
6388 msm_pcie_dev[rc_idx].parf_deemph = 0;
6389 msm_pcie_dev[rc_idx].parf_swing = 0;
6390 msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
6391 msm_pcie_dev[rc_idx].user_suspend = false;
6392 msm_pcie_dev[rc_idx].disable_pc = false;
6393 msm_pcie_dev[rc_idx].saved_state = NULL;
6394 msm_pcie_dev[rc_idx].enumerated = false;
6395 msm_pcie_dev[rc_idx].num_active_ep = 0;
6396 msm_pcie_dev[rc_idx].num_ep = 0;
6397 msm_pcie_dev[rc_idx].pending_ep_reg = false;
6398 msm_pcie_dev[rc_idx].phy_len = 0;
6399 msm_pcie_dev[rc_idx].port_phy_len = 0;
6400 msm_pcie_dev[rc_idx].phy_sequence = NULL;
6401 msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
6402 msm_pcie_dev[rc_idx].event_reg = NULL;
6403 msm_pcie_dev[rc_idx].linkdown_counter = 0;
6404 msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
6405 msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
6406 msm_pcie_dev[rc_idx].rc_corr_counter = 0;
6407 msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
6408 msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
6409 msm_pcie_dev[rc_idx].ep_corr_counter = 0;
6410 msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
6411 msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
6412 msm_pcie_dev[rc_idx].suspending = false;
6413 msm_pcie_dev[rc_idx].wake_counter = 0;
6414 msm_pcie_dev[rc_idx].aer_enable = true;
6415 msm_pcie_dev[rc_idx].power_on = false;
6416 msm_pcie_dev[rc_idx].current_short_bdf = 0;
6417 msm_pcie_dev[rc_idx].use_msi = false;
6418 msm_pcie_dev[rc_idx].use_pinctrl = false;
6419 msm_pcie_dev[rc_idx].linkdown_panic = false;
6420 msm_pcie_dev[rc_idx].bridge_found = false;
6421 memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
6422 sizeof(msm_pcie_vreg_info));
6423 memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
6424 sizeof(msm_pcie_gpio_info));
6425 memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
6426 sizeof(msm_pcie_clk_info[rc_idx]));
6427 memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
6428 sizeof(msm_pcie_pipe_clk_info[rc_idx]));
6429 memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
6430 sizeof(msm_pcie_res_info));
6431 memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
6432 sizeof(msm_pcie_irq_info));
6433 memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
6434 sizeof(msm_pcie_msi_info));
6435 memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
6436 sizeof(msm_pcie_reset_info[rc_idx]));
6437 memcpy(msm_pcie_dev[rc_idx].pipe_reset,
6438 msm_pcie_pipe_reset_info[rc_idx],
6439 sizeof(msm_pcie_pipe_reset_info[rc_idx]));
6440 msm_pcie_dev[rc_idx].shadow_en = true;
6441 for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
6442 msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
6443 for (i = 0; i < MAX_DEVICE_NUM; i++)
6444 for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
6445 msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
6446 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6447 msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
6448 msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
6449 msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
6450 msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
6451 msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
6452 msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0;
6453 msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
6454 msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
6455 msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
6456 msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
6457 }
6458
Tony Truongbd9a3412017-02-27 18:30:13 -08006459 dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
Tony Truongbd9a3412017-02-27 18:30:13 -08006460
Tony Truong349ee492014-10-01 17:35:56 -07006461 ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
6462 msm_pcie_dev[rc_idx].pdev);
6463
6464 if (ret)
6465 goto decrease_rc_num;
6466
6467 msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
6468 if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
6469 PCIE_ERR(&msm_pcie_dev[rc_idx],
6470 "PCIe: RC%d failed to get pinctrl\n",
6471 rc_idx);
6472 else
6473 msm_pcie_dev[rc_idx].use_pinctrl = true;
6474
6475 if (msm_pcie_dev[rc_idx].use_pinctrl) {
6476 msm_pcie_dev[rc_idx].pins_default =
6477 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6478 "default");
6479 if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
6480 PCIE_ERR(&msm_pcie_dev[rc_idx],
6481 "PCIe: RC%d could not get pinctrl default state\n",
6482 rc_idx);
6483 msm_pcie_dev[rc_idx].pins_default = NULL;
6484 }
6485
6486 msm_pcie_dev[rc_idx].pins_sleep =
6487 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6488 "sleep");
6489 if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
6490 PCIE_ERR(&msm_pcie_dev[rc_idx],
6491 "PCIe: RC%d could not get pinctrl sleep state\n",
6492 rc_idx);
6493 msm_pcie_dev[rc_idx].pins_sleep = NULL;
6494 }
6495 }
6496
6497 ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
6498 if (ret) {
6499 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6500 goto decrease_rc_num;
6501 }
6502
6503 ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
6504 if (ret) {
6505 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6506 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6507 goto decrease_rc_num;
6508 }
6509
Tony Truong14a5ddf2017-04-20 11:04:03 -07006510 msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
6511
Tony Truong349ee492014-10-01 17:35:56 -07006512 msm_pcie_dev[rc_idx].drv_ready = true;
6513
Tony Truong9f2c7722017-02-28 15:02:27 -08006514 if (msm_pcie_dev[rc_idx].boot_option &
6515 MSM_PCIE_NO_PROBE_ENUMERATION) {
Tony Truong349ee492014-10-01 17:35:56 -07006516 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08006517 "PCIe: RC%d will be enumerated by client or endpoint.\n",
Tony Truong349ee492014-10-01 17:35:56 -07006518 rc_idx);
6519 mutex_unlock(&pcie_drv.drv_lock);
6520 return 0;
6521 }
6522
6523 ret = msm_pcie_enumerate(rc_idx);
6524
6525 if (ret)
6526 PCIE_ERR(&msm_pcie_dev[rc_idx],
6527 "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
6528 rc_idx);
6529 else
6530 PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
6531 rc_idx);
6532
6533 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
6534 dev_name(&(pdev->dev)));
6535
6536 mutex_unlock(&pcie_drv.drv_lock);
6537 return 0;
6538
6539decrease_rc_num:
6540 pcie_drv.rc_num--;
6541out:
6542 if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
6543 pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
6544 rc_idx);
6545 else
6546 PCIE_ERR(&msm_pcie_dev[rc_idx],
6547 "PCIe: Driver probe failed for RC%d:%d\n",
6548 rc_idx, ret);
6549
6550 mutex_unlock(&pcie_drv.drv_lock);
6551
6552 return ret;
6553}
6554
6555static int msm_pcie_remove(struct platform_device *pdev)
6556{
6557 int ret = 0;
6558 int rc_idx;
6559
6560 PCIE_GEN_DBG("PCIe:%s.\n", __func__);
6561
6562 mutex_lock(&pcie_drv.drv_lock);
6563
6564 ret = of_property_read_u32((&pdev->dev)->of_node,
6565 "cell-index", &rc_idx);
6566 if (ret) {
6567 pr_err("%s: Did not find RC index.\n", __func__);
6568 goto out;
6569 } else {
6570 pcie_drv.rc_num--;
6571 PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
6572 }
6573
6574 msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
6575 msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
6576 msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
6577 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6578 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6579
6580out:
6581 mutex_unlock(&pcie_drv.drv_lock);
6582
6583 return ret;
6584}
6585
6586static const struct of_device_id msm_pcie_match[] = {
6587 { .compatible = "qcom,pci-msm",
6588 },
6589 {}
6590};
6591
6592static struct platform_driver msm_pcie_driver = {
6593 .probe = msm_pcie_probe,
6594 .remove = msm_pcie_remove,
6595 .driver = {
6596 .name = "pci-msm",
6597 .owner = THIS_MODULE,
6598 .of_match_table = msm_pcie_match,
6599 },
6600};
6601
6602int __init pcie_init(void)
6603{
6604 int ret = 0, i;
6605 char rc_name[MAX_RC_NAME_LEN];
6606
6607 pr_alert("pcie:%s.\n", __func__);
6608
6609 pcie_drv.rc_num = 0;
6610 mutex_init(&pcie_drv.drv_lock);
6611 mutex_init(&com_phy_lock);
6612
6613 for (i = 0; i < MAX_RC_NUM; i++) {
6614 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
6615 msm_pcie_dev[i].ipc_log =
6616 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6617 if (msm_pcie_dev[i].ipc_log == NULL)
6618 pr_err("%s: unable to create IPC log context for %s\n",
6619 __func__, rc_name);
6620 else
6621 PCIE_DBG(&msm_pcie_dev[i],
6622 "PCIe IPC logging is enable for RC%d\n",
6623 i);
6624 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
6625 msm_pcie_dev[i].ipc_log_long =
6626 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6627 if (msm_pcie_dev[i].ipc_log_long == NULL)
6628 pr_err("%s: unable to create IPC log context for %s\n",
6629 __func__, rc_name);
6630 else
6631 PCIE_DBG(&msm_pcie_dev[i],
6632 "PCIe IPC logging %s is enable for RC%d\n",
6633 rc_name, i);
6634 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
6635 msm_pcie_dev[i].ipc_log_dump =
6636 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6637 if (msm_pcie_dev[i].ipc_log_dump == NULL)
6638 pr_err("%s: unable to create IPC log context for %s\n",
6639 __func__, rc_name);
6640 else
6641 PCIE_DBG(&msm_pcie_dev[i],
6642 "PCIe IPC logging %s is enable for RC%d\n",
6643 rc_name, i);
6644 spin_lock_init(&msm_pcie_dev[i].cfg_lock);
6645 msm_pcie_dev[i].cfg_access = true;
6646 mutex_init(&msm_pcie_dev[i].enumerate_lock);
6647 mutex_init(&msm_pcie_dev[i].setup_lock);
6648 mutex_init(&msm_pcie_dev[i].recovery_lock);
6649 spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
6650 spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
6651 spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
6652 spin_lock_init(&msm_pcie_dev[i].aer_lock);
6653 msm_pcie_dev[i].drv_ready = false;
6654 }
6655 for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
6656 msm_pcie_dev_tbl[i].bdf = 0;
6657 msm_pcie_dev_tbl[i].dev = NULL;
6658 msm_pcie_dev_tbl[i].short_bdf = 0;
6659 msm_pcie_dev_tbl[i].sid = 0;
6660 msm_pcie_dev_tbl[i].domain = -1;
6661 msm_pcie_dev_tbl[i].conf_base = 0;
6662 msm_pcie_dev_tbl[i].phy_address = 0;
6663 msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
6664 msm_pcie_dev_tbl[i].event_reg = NULL;
6665 msm_pcie_dev_tbl[i].registered = true;
6666 }
6667
6668 msm_pcie_debugfs_init();
6669
6670 ret = platform_driver_register(&msm_pcie_driver);
6671
6672 return ret;
6673}
6674
6675static void __exit pcie_exit(void)
6676{
Tony Truongbd9a3412017-02-27 18:30:13 -08006677 int i;
6678
Tony Truong349ee492014-10-01 17:35:56 -07006679 PCIE_GEN_DBG("pcie:%s.\n", __func__);
6680
6681 platform_driver_unregister(&msm_pcie_driver);
6682
6683 msm_pcie_debugfs_exit();
Tony Truongbd9a3412017-02-27 18:30:13 -08006684
6685 for (i = 0; i < MAX_RC_NUM; i++)
6686 msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
Tony Truong349ee492014-10-01 17:35:56 -07006687}
6688
6689subsys_initcall_sync(pcie_init);
6690module_exit(pcie_exit);
6691
6692
6693/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
6694static void msm_pcie_fixup_early(struct pci_dev *dev)
6695{
6696 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6697
6698 PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
6699 if (dev->hdr_type == 1)
6700 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
6701}
6702DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6703 msm_pcie_fixup_early);
6704
6705/* Suspend the PCIe link */
6706static int msm_pcie_pm_suspend(struct pci_dev *dev,
6707 void *user, void *data, u32 options)
6708{
6709 int ret = 0;
6710 u32 val = 0;
6711 int ret_l23;
6712 unsigned long irqsave_flags;
6713 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6714
6715 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6716
6717 spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
6718 pcie_dev->suspending = true;
6719 spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
6720
6721 if (!pcie_dev->power_on) {
6722 PCIE_DBG(pcie_dev,
6723 "PCIe: power of RC%d has been turned off.\n",
6724 pcie_dev->rc_idx);
6725 return ret;
6726 }
6727
6728 if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
6729 && msm_pcie_confirm_linkup(pcie_dev, true, true,
6730 pcie_dev->conf)) {
6731 ret = pci_save_state(dev);
6732 pcie_dev->saved_state = pci_store_saved_state(dev);
6733 }
6734 if (ret) {
6735 PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
6736 pcie_dev->rc_idx, ret);
6737 pcie_dev->suspending = false;
6738 return ret;
6739 }
6740
6741 spin_lock_irqsave(&pcie_dev->cfg_lock,
6742 pcie_dev->irqsave_flags);
6743 pcie_dev->cfg_access = false;
6744 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6745 pcie_dev->irqsave_flags);
6746
6747 msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
6748 BIT(4));
6749
6750 PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
6751 pcie_dev->rc_idx);
6752
6753 ret_l23 = readl_poll_timeout((pcie_dev->parf
6754 + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
6755
6756 /* check L23_Ready */
6757 PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
6758 pcie_dev->rc_idx,
6759 readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
6760 if (!ret_l23)
6761 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
6762 pcie_dev->rc_idx);
6763 else
6764 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
6765 pcie_dev->rc_idx);
6766
6767 msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6768
6769 if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
6770 pinctrl_select_state(pcie_dev->pinctrl,
6771 pcie_dev->pins_sleep);
6772
6773 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6774
6775 return ret;
6776}
6777
6778static void msm_pcie_fixup_suspend(struct pci_dev *dev)
6779{
6780 int ret;
6781 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6782
6783 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6784
6785 if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
6786 return;
6787
6788 spin_lock_irqsave(&pcie_dev->cfg_lock,
6789 pcie_dev->irqsave_flags);
6790 if (pcie_dev->disable_pc) {
6791 PCIE_DBG(pcie_dev,
6792 "RC%d: Skip suspend because of user request\n",
6793 pcie_dev->rc_idx);
6794 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6795 pcie_dev->irqsave_flags);
6796 return;
6797 }
6798 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6799 pcie_dev->irqsave_flags);
6800
6801 mutex_lock(&pcie_dev->recovery_lock);
6802
6803 ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
6804 if (ret)
6805 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
6806 pcie_dev->rc_idx, ret);
6807
6808 mutex_unlock(&pcie_dev->recovery_lock);
6809}
6810DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6811 msm_pcie_fixup_suspend);
6812
6813/* Resume the PCIe link */
6814static int msm_pcie_pm_resume(struct pci_dev *dev,
6815 void *user, void *data, u32 options)
6816{
6817 int ret;
6818 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6819
6820 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6821
6822 if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
6823 pinctrl_select_state(pcie_dev->pinctrl,
6824 pcie_dev->pins_default);
6825
6826 spin_lock_irqsave(&pcie_dev->cfg_lock,
6827 pcie_dev->irqsave_flags);
6828 pcie_dev->cfg_access = true;
6829 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6830 pcie_dev->irqsave_flags);
6831
6832 ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6833 if (ret) {
6834 PCIE_ERR(pcie_dev,
6835 "PCIe: RC%d fail to enable PCIe link in resume.\n",
6836 pcie_dev->rc_idx);
6837 return ret;
6838 }
6839
6840 pcie_dev->suspending = false;
6841 PCIE_DBG(pcie_dev,
6842 "dev->bus->number = %d dev->bus->primary = %d\n",
6843 dev->bus->number, dev->bus->primary);
6844
6845 if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
6846 PCIE_DBG(pcie_dev,
6847 "RC%d: entry of PCI framework restore state\n",
6848 pcie_dev->rc_idx);
6849
6850 pci_load_and_free_saved_state(dev,
6851 &pcie_dev->saved_state);
6852 pci_restore_state(dev);
6853
6854 PCIE_DBG(pcie_dev,
6855 "RC%d: exit of PCI framework restore state\n",
6856 pcie_dev->rc_idx);
6857 }
6858
6859 if (pcie_dev->bridge_found) {
6860 PCIE_DBG(pcie_dev,
6861 "RC%d: entry of PCIe recover config\n",
6862 pcie_dev->rc_idx);
6863
6864 msm_pcie_recover_config(dev);
6865
6866 PCIE_DBG(pcie_dev,
6867 "RC%d: exit of PCIe recover config\n",
6868 pcie_dev->rc_idx);
6869 }
6870
6871 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6872
6873 return ret;
6874}
6875
6876void msm_pcie_fixup_resume(struct pci_dev *dev)
6877{
6878 int ret;
6879 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6880
6881 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6882
6883 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6884 pcie_dev->user_suspend)
6885 return;
6886
6887 mutex_lock(&pcie_dev->recovery_lock);
6888 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6889 if (ret)
6890 PCIE_ERR(pcie_dev,
6891 "PCIe: RC%d got failure in fixup resume:%d.\n",
6892 pcie_dev->rc_idx, ret);
6893
6894 mutex_unlock(&pcie_dev->recovery_lock);
6895}
6896DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6897 msm_pcie_fixup_resume);
6898
6899void msm_pcie_fixup_resume_early(struct pci_dev *dev)
6900{
6901 int ret;
6902 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6903
6904 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6905
6906 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6907 pcie_dev->user_suspend)
6908 return;
6909
6910 mutex_lock(&pcie_dev->recovery_lock);
6911 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6912 if (ret)
6913 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
6914 pcie_dev->rc_idx, ret);
6915
6916 mutex_unlock(&pcie_dev->recovery_lock);
6917}
6918DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6919 msm_pcie_fixup_resume_early);
6920
6921int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
6922 void *data, u32 options)
6923{
6924 int i, ret = 0;
6925 struct pci_dev *dev;
6926 u32 rc_idx = 0;
6927 struct msm_pcie_dev_t *pcie_dev;
6928
6929 PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
6930 pm_opt, busnr, options);
6931
6932
6933 if (!user) {
6934 pr_err("PCIe: endpoint device is NULL\n");
6935 ret = -ENODEV;
6936 goto out;
6937 }
6938
6939 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
6940
6941 if (pcie_dev) {
6942 rc_idx = pcie_dev->rc_idx;
6943 PCIE_DBG(pcie_dev,
6944 "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
6945 rc_idx, pm_opt, busnr, options);
6946 } else {
6947 pr_err(
6948 "PCIe: did not find RC for pci endpoint device.\n"
6949 );
6950 ret = -ENODEV;
6951 goto out;
6952 }
6953
6954 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6955 if (!busnr)
6956 break;
6957 if (user == pcie_dev->pcidev_table[i].dev) {
6958 if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
6959 break;
6960
6961 PCIE_ERR(pcie_dev,
6962 "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
6963 pcie_dev->rc_idx, busnr,
6964 pcie_dev->pcidev_table[i].bdf >> 24);
6965 ret = MSM_PCIE_ERROR;
6966 goto out;
6967 }
6968 }
6969
6970 if (i == MAX_DEVICE_NUM) {
6971 PCIE_ERR(pcie_dev,
6972 "PCIe: RC%d: endpoint device was not found in device table",
6973 pcie_dev->rc_idx);
6974 ret = MSM_PCIE_ERROR;
6975 goto out;
6976 }
6977
6978 dev = msm_pcie_dev[rc_idx].dev;
6979
6980 if (!msm_pcie_dev[rc_idx].drv_ready) {
6981 PCIE_ERR(&msm_pcie_dev[rc_idx],
6982 "RC%d has not been successfully probed yet\n",
6983 rc_idx);
6984 return -EPROBE_DEFER;
6985 }
6986
6987 switch (pm_opt) {
6988 case MSM_PCIE_SUSPEND:
6989 PCIE_DBG(&msm_pcie_dev[rc_idx],
6990 "User of RC%d requests to suspend the link\n", rc_idx);
6991 if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
6992 PCIE_DBG(&msm_pcie_dev[rc_idx],
6993 "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
6994 rc_idx, msm_pcie_dev[rc_idx].link_status);
6995
6996 if (!msm_pcie_dev[rc_idx].power_on) {
6997 PCIE_ERR(&msm_pcie_dev[rc_idx],
6998 "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
6999 rc_idx, msm_pcie_dev[rc_idx].link_status);
7000 break;
7001 }
7002
7003 if (msm_pcie_dev[rc_idx].pending_ep_reg) {
7004 PCIE_DBG(&msm_pcie_dev[rc_idx],
7005 "PCIe: RC%d: request to suspend the link is rejected\n",
7006 rc_idx);
7007 break;
7008 }
7009
7010 if (pcie_dev->num_active_ep) {
7011 PCIE_DBG(pcie_dev,
7012 "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
7013 pcie_dev->rc_idx, pcie_dev->num_active_ep);
7014 return ret;
7015 }
7016
7017 msm_pcie_dev[rc_idx].user_suspend = true;
7018
7019 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
7020
7021 ret = msm_pcie_pm_suspend(dev, user, data, options);
7022 if (ret) {
7023 PCIE_ERR(&msm_pcie_dev[rc_idx],
7024 "PCIe: RC%d: user failed to suspend the link.\n",
7025 rc_idx);
7026 msm_pcie_dev[rc_idx].user_suspend = false;
7027 }
7028
7029 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
7030 break;
7031 case MSM_PCIE_RESUME:
7032 PCIE_DBG(&msm_pcie_dev[rc_idx],
7033 "User of RC%d requests to resume the link\n", rc_idx);
7034 if (msm_pcie_dev[rc_idx].link_status !=
7035 MSM_PCIE_LINK_DISABLED) {
7036 PCIE_ERR(&msm_pcie_dev[rc_idx],
7037 "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
7038 rc_idx, msm_pcie_dev[rc_idx].link_status,
7039 msm_pcie_dev[rc_idx].num_active_ep);
7040 break;
7041 }
7042
7043 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
7044 ret = msm_pcie_pm_resume(dev, user, data, options);
7045 if (ret) {
7046 PCIE_ERR(&msm_pcie_dev[rc_idx],
7047 "PCIe: RC%d: user failed to resume the link.\n",
7048 rc_idx);
7049 } else {
7050 PCIE_DBG(&msm_pcie_dev[rc_idx],
7051 "PCIe: RC%d: user succeeded to resume the link.\n",
7052 rc_idx);
7053
7054 msm_pcie_dev[rc_idx].user_suspend = false;
7055 }
7056
7057 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
7058
7059 break;
7060 case MSM_PCIE_DISABLE_PC:
7061 PCIE_DBG(&msm_pcie_dev[rc_idx],
7062 "User of RC%d requests to keep the link always alive.\n",
7063 rc_idx);
7064 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
7065 msm_pcie_dev[rc_idx].irqsave_flags);
7066 if (msm_pcie_dev[rc_idx].suspending) {
7067 PCIE_ERR(&msm_pcie_dev[rc_idx],
7068 "PCIe: RC%d Link has been suspended before request\n",
7069 rc_idx);
7070 ret = MSM_PCIE_ERROR;
7071 } else {
7072 msm_pcie_dev[rc_idx].disable_pc = true;
7073 }
7074 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
7075 msm_pcie_dev[rc_idx].irqsave_flags);
7076 break;
7077 case MSM_PCIE_ENABLE_PC:
7078 PCIE_DBG(&msm_pcie_dev[rc_idx],
7079 "User of RC%d cancels the request of alive link.\n",
7080 rc_idx);
7081 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
7082 msm_pcie_dev[rc_idx].irqsave_flags);
7083 msm_pcie_dev[rc_idx].disable_pc = false;
7084 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
7085 msm_pcie_dev[rc_idx].irqsave_flags);
7086 break;
7087 default:
7088 PCIE_ERR(&msm_pcie_dev[rc_idx],
7089 "PCIe: RC%d: unsupported pm operation:%d.\n",
7090 rc_idx, pm_opt);
7091 ret = -ENODEV;
7092 goto out;
7093 }
7094
7095out:
7096 return ret;
7097}
7098EXPORT_SYMBOL(msm_pcie_pm_control);
7099
7100int msm_pcie_register_event(struct msm_pcie_register_event *reg)
7101{
7102 int i, ret = 0;
7103 struct msm_pcie_dev_t *pcie_dev;
7104
7105 if (!reg) {
7106 pr_err("PCIe: Event registration is NULL\n");
7107 return -ENODEV;
7108 }
7109
7110 if (!reg->user) {
7111 pr_err("PCIe: User of event registration is NULL\n");
7112 return -ENODEV;
7113 }
7114
7115 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
7116
7117 if (!pcie_dev) {
7118 PCIE_ERR(pcie_dev, "%s",
7119 "PCIe: did not find RC for pci endpoint device.\n");
7120 return -ENODEV;
7121 }
7122
7123 if (pcie_dev->num_ep > 1) {
7124 for (i = 0; i < MAX_DEVICE_NUM; i++) {
7125 if (reg->user ==
7126 pcie_dev->pcidev_table[i].dev) {
7127 pcie_dev->event_reg =
7128 pcie_dev->pcidev_table[i].event_reg;
7129
7130 if (!pcie_dev->event_reg) {
7131 pcie_dev->pcidev_table[i].registered =
7132 true;
7133
7134 pcie_dev->num_active_ep++;
7135 PCIE_DBG(pcie_dev,
7136 "PCIe: RC%d: number of active EP(s): %d.\n",
7137 pcie_dev->rc_idx,
7138 pcie_dev->num_active_ep);
7139 }
7140
7141 pcie_dev->event_reg = reg;
7142 pcie_dev->pcidev_table[i].event_reg = reg;
7143 PCIE_DBG(pcie_dev,
7144 "Event 0x%x is registered for RC %d\n",
7145 reg->events,
7146 pcie_dev->rc_idx);
7147
7148 break;
7149 }
7150 }
7151
7152 if (pcie_dev->pending_ep_reg) {
7153 for (i = 0; i < MAX_DEVICE_NUM; i++)
7154 if (!pcie_dev->pcidev_table[i].registered)
7155 break;
7156
7157 if (i == MAX_DEVICE_NUM)
7158 pcie_dev->pending_ep_reg = false;
7159 }
7160 } else {
7161 pcie_dev->event_reg = reg;
7162 PCIE_DBG(pcie_dev,
7163 "Event 0x%x is registered for RC %d\n", reg->events,
7164 pcie_dev->rc_idx);
7165 }
7166
7167 return ret;
7168}
7169EXPORT_SYMBOL(msm_pcie_register_event);
7170
7171int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
7172{
7173 int i, ret = 0;
7174 struct msm_pcie_dev_t *pcie_dev;
7175
7176 if (!reg) {
7177 pr_err("PCIe: Event deregistration is NULL\n");
7178 return -ENODEV;
7179 }
7180
7181 if (!reg->user) {
7182 pr_err("PCIe: User of event deregistration is NULL\n");
7183 return -ENODEV;
7184 }
7185
7186 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
7187
7188 if (!pcie_dev) {
7189 PCIE_ERR(pcie_dev, "%s",
7190 "PCIe: did not find RC for pci endpoint device.\n");
7191 return -ENODEV;
7192 }
7193
7194 if (pcie_dev->num_ep > 1) {
7195 for (i = 0; i < MAX_DEVICE_NUM; i++) {
7196 if (reg->user == pcie_dev->pcidev_table[i].dev) {
7197 if (pcie_dev->pcidev_table[i].event_reg) {
7198 pcie_dev->num_active_ep--;
7199 PCIE_DBG(pcie_dev,
7200 "PCIe: RC%d: number of active EP(s) left: %d.\n",
7201 pcie_dev->rc_idx,
7202 pcie_dev->num_active_ep);
7203 }
7204
7205 pcie_dev->event_reg = NULL;
7206 pcie_dev->pcidev_table[i].event_reg = NULL;
7207 PCIE_DBG(pcie_dev,
7208 "Event is deregistered for RC %d\n",
7209 pcie_dev->rc_idx);
7210
7211 break;
7212 }
7213 }
7214 } else {
7215 pcie_dev->event_reg = NULL;
7216 PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
7217 pcie_dev->rc_idx);
7218 }
7219
7220 return ret;
7221}
7222EXPORT_SYMBOL(msm_pcie_deregister_event);
7223
7224int msm_pcie_recover_config(struct pci_dev *dev)
7225{
7226 int ret = 0;
7227 struct msm_pcie_dev_t *pcie_dev;
7228
7229 if (dev) {
7230 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7231 PCIE_DBG(pcie_dev,
7232 "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
7233 } else {
7234 pr_err("PCIe: the input pci dev is NULL.\n");
7235 return -ENODEV;
7236 }
7237
7238 if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
7239 PCIE_DBG(pcie_dev,
7240 "Recover config space of RC%d and its EP\n",
7241 pcie_dev->rc_idx);
7242 pcie_dev->shadow_en = false;
7243 PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
7244 msm_pcie_cfg_recover(pcie_dev, true);
7245 PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
7246 msm_pcie_cfg_recover(pcie_dev, false);
7247 PCIE_DBG(pcie_dev,
7248 "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
7249 pcie_dev->rc_idx);
7250 pci_save_state(pcie_dev->dev);
7251 pci_save_state(dev);
7252 pcie_dev->shadow_en = true;
7253 PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
7254 pcie_dev->rc_idx);
7255 } else {
7256 PCIE_ERR(pcie_dev,
7257 "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
7258 pcie_dev->rc_idx);
7259 ret = -ENODEV;
7260 }
7261
7262 return ret;
7263}
7264EXPORT_SYMBOL(msm_pcie_recover_config);
7265
7266int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
7267{
7268 int ret = 0;
7269 struct msm_pcie_dev_t *pcie_dev;
7270
7271 if (dev) {
7272 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7273 PCIE_DBG(pcie_dev,
7274 "User requests to %s shadow\n",
7275 enable ? "enable" : "disable");
7276 } else {
7277 pr_err("PCIe: the input pci dev is NULL.\n");
7278 return -ENODEV;
7279 }
7280
7281 PCIE_DBG(pcie_dev,
7282 "The shadowing of RC%d is %s enabled currently.\n",
7283 pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
7284
7285 pcie_dev->shadow_en = enable;
7286
7287 PCIE_DBG(pcie_dev,
7288 "Shadowing of RC%d is turned %s upon user's request.\n",
7289 pcie_dev->rc_idx, enable ? "on" : "off");
7290
7291 return ret;
7292}
7293EXPORT_SYMBOL(msm_pcie_shadow_control);