blob: 49a4466f9aea6fbc69a6120cc766e14f454ca1c7 [file] [log] [blame]
Tony Truong349ee492014-10-01 17:35:56 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/gpio.h>
23#include <linux/iopoll.h>
24#include <linux/kernel.h>
25#include <linux/of_pci.h>
26#include <linux/pci.h>
Tony Truong52122a62017-03-23 18:00:34 -070027#include <linux/iommu.h>
Tony Truong349ee492014-10-01 17:35:56 -070028#include <linux/platform_device.h>
29#include <linux/regulator/consumer.h>
30#include <linux/regulator/rpm-smd-regulator.h>
31#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/of_gpio.h>
34#include <linux/clk/msm-clk.h>
35#include <linux/reset.h>
36#include <linux/msm-bus.h>
37#include <linux/msm-bus-board.h>
38#include <linux/debugfs.h>
39#include <linux/uaccess.h>
40#include <linux/io.h>
41#include <linux/msi.h>
42#include <linux/interrupt.h>
43#include <linux/irq.h>
44#include <linux/irqdomain.h>
45#include <linux/pm_wakeup.h>
46#include <linux/compiler.h>
47#include <soc/qcom/scm.h>
48#include <linux/ipc_logging.h>
49#include <linux/msm_pcie.h>
50
51#ifdef CONFIG_ARCH_MDMCALIFORNIUM
52#define PCIE_VENDOR_ID_RCP 0x17cb
53#define PCIE_DEVICE_ID_RCP 0x0302
54
55#define PCIE20_L1SUB_CONTROL1 0x158
56#define PCIE20_PARF_DBI_BASE_ADDR 0x350
57#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
58
59#define TX_BASE 0x200
60#define RX_BASE 0x400
61#define PCS_BASE 0x800
62#define PCS_MISC_BASE 0x600
63
64#elif defined(CONFIG_ARCH_MSM8998)
65#define PCIE_VENDOR_ID_RCP 0x17cb
66#define PCIE_DEVICE_ID_RCP 0x0105
67
68#define PCIE20_L1SUB_CONTROL1 0x1E4
69#define PCIE20_PARF_DBI_BASE_ADDR 0x350
70#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
71
72#define TX_BASE 0
73#define RX_BASE 0
74#define PCS_BASE 0x800
75#define PCS_MISC_BASE 0
76
77#else
78#define PCIE_VENDOR_ID_RCP 0x17cb
79#define PCIE_DEVICE_ID_RCP 0x0104
80
81#define PCIE20_L1SUB_CONTROL1 0x158
82#define PCIE20_PARF_DBI_BASE_ADDR 0x168
83#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
84
85#define TX_BASE 0x1000
86#define RX_BASE 0x1200
87#define PCS_BASE 0x1400
88#define PCS_MISC_BASE 0
89#endif
90
91#define TX(n, m) (TX_BASE + n * m * 0x1000)
92#define RX(n, m) (RX_BASE + n * m * 0x1000)
93#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
94#define PCS_MISC_PORT(n, m) (PCS_MISC_BASE + n * m * 0x1000)
95
96#define QSERDES_COM_BG_TIMER 0x00C
97#define QSERDES_COM_SSC_EN_CENTER 0x010
98#define QSERDES_COM_SSC_ADJ_PER1 0x014
99#define QSERDES_COM_SSC_ADJ_PER2 0x018
100#define QSERDES_COM_SSC_PER1 0x01C
101#define QSERDES_COM_SSC_PER2 0x020
102#define QSERDES_COM_SSC_STEP_SIZE1 0x024
103#define QSERDES_COM_SSC_STEP_SIZE2 0x028
104#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
105#define QSERDES_COM_CLK_ENABLE1 0x038
106#define QSERDES_COM_SYS_CLK_CTRL 0x03C
107#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
108#define QSERDES_COM_PLL_IVCO 0x048
109#define QSERDES_COM_LOCK_CMP1_MODE0 0x04C
110#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
111#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
112#define QSERDES_COM_BG_TRIM 0x070
113#define QSERDES_COM_CLK_EP_DIV 0x074
114#define QSERDES_COM_CP_CTRL_MODE0 0x078
115#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
116#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
117#define QSERDES_COM_SYSCLK_EN_SEL 0x0AC
118#define QSERDES_COM_RESETSM_CNTRL 0x0B4
119#define QSERDES_COM_RESTRIM_CTRL 0x0BC
120#define QSERDES_COM_RESCODE_DIV_NUM 0x0C4
121#define QSERDES_COM_LOCK_CMP_EN 0x0C8
122#define QSERDES_COM_DEC_START_MODE0 0x0D0
123#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0DC
124#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0E0
125#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0E4
126#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
127#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10C
128#define QSERDES_COM_VCO_TUNE_CTRL 0x124
129#define QSERDES_COM_VCO_TUNE_MAP 0x128
130#define QSERDES_COM_VCO_TUNE1_MODE0 0x12C
131#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
132#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
133#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
134#define QSERDES_COM_BG_CTRL 0x170
135#define QSERDES_COM_CLK_SELECT 0x174
136#define QSERDES_COM_HSCLK_SEL 0x178
137#define QSERDES_COM_CORECLK_DIV 0x184
138#define QSERDES_COM_CORE_CLK_EN 0x18C
139#define QSERDES_COM_C_READY_STATUS 0x190
140#define QSERDES_COM_CMN_CONFIG 0x194
141#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19C
142#define QSERDES_COM_DEBUG_BUS0 0x1A0
143#define QSERDES_COM_DEBUG_BUS1 0x1A4
144#define QSERDES_COM_DEBUG_BUS2 0x1A8
145#define QSERDES_COM_DEBUG_BUS3 0x1AC
146#define QSERDES_COM_DEBUG_BUS_SEL 0x1B0
147
148#define QSERDES_TX_N_RES_CODE_LANE_OFFSET(n, m) (TX(n, m) + 0x4C)
149#define QSERDES_TX_N_DEBUG_BUS_SEL(n, m) (TX(n, m) + 0x64)
150#define QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(n, m) (TX(n, m) + 0x68)
151#define QSERDES_TX_N_LANE_MODE(n, m) (TX(n, m) + 0x94)
152#define QSERDES_TX_N_RCV_DETECT_LVL_2(n, m) (TX(n, m) + 0xAC)
153
154#define QSERDES_RX_N_UCDR_SO_GAIN_HALF(n, m) (RX(n, m) + 0x010)
155#define QSERDES_RX_N_UCDR_SO_GAIN(n, m) (RX(n, m) + 0x01C)
156#define QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(n, m) (RX(n, m) + 0x048)
157#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(n, m) (RX(n, m) + 0x0D8)
158#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(n, m) (RX(n, m) + 0x0DC)
159#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(n, m) (RX(n, m) + 0x0E0)
160#define QSERDES_RX_N_SIGDET_ENABLES(n, m) (RX(n, m) + 0x110)
161#define QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(n, m) (RX(n, m) + 0x11C)
162#define QSERDES_RX_N_SIGDET_LVL(n, m) (RX(n, m) + 0x118)
163#define QSERDES_RX_N_RX_BAND(n, m) (RX(n, m) + 0x120)
164
165#define PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x00)
166#define PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x04)
167#define PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x08)
168#define PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x0C)
169#define PCIE_MISC_N_DEBUG_BUS_0_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x14)
170#define PCIE_MISC_N_DEBUG_BUS_1_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x18)
171#define PCIE_MISC_N_DEBUG_BUS_2_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x1C)
172#define PCIE_MISC_N_DEBUG_BUS_3_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x20)
173
174#define PCIE_N_SW_RESET(n, m) (PCS_PORT(n, m) + 0x00)
175#define PCIE_N_POWER_DOWN_CONTROL(n, m) (PCS_PORT(n, m) + 0x04)
176#define PCIE_N_START_CONTROL(n, m) (PCS_PORT(n, m) + 0x08)
177#define PCIE_N_TXDEEMPH_M6DB_V0(n, m) (PCS_PORT(n, m) + 0x24)
178#define PCIE_N_TXDEEMPH_M3P5DB_V0(n, m) (PCS_PORT(n, m) + 0x28)
179#define PCIE_N_ENDPOINT_REFCLK_DRIVE(n, m) (PCS_PORT(n, m) + 0x54)
180#define PCIE_N_RX_IDLE_DTCT_CNTRL(n, m) (PCS_PORT(n, m) + 0x58)
181#define PCIE_N_POWER_STATE_CONFIG1(n, m) (PCS_PORT(n, m) + 0x60)
182#define PCIE_N_POWER_STATE_CONFIG4(n, m) (PCS_PORT(n, m) + 0x6C)
183#define PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA0)
184#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA4)
185#define PCIE_N_PLL_LOCK_CHK_DLY_TIME(n, m) (PCS_PORT(n, m) + 0xA8)
186#define PCIE_N_TEST_CONTROL4(n, m) (PCS_PORT(n, m) + 0x11C)
187#define PCIE_N_TEST_CONTROL5(n, m) (PCS_PORT(n, m) + 0x120)
188#define PCIE_N_TEST_CONTROL6(n, m) (PCS_PORT(n, m) + 0x124)
189#define PCIE_N_TEST_CONTROL7(n, m) (PCS_PORT(n, m) + 0x128)
190#define PCIE_N_PCS_STATUS(n, m) (PCS_PORT(n, m) + 0x174)
191#define PCIE_N_DEBUG_BUS_0_STATUS(n, m) (PCS_PORT(n, m) + 0x198)
192#define PCIE_N_DEBUG_BUS_1_STATUS(n, m) (PCS_PORT(n, m) + 0x19C)
193#define PCIE_N_DEBUG_BUS_2_STATUS(n, m) (PCS_PORT(n, m) + 0x1A0)
194#define PCIE_N_DEBUG_BUS_3_STATUS(n, m) (PCS_PORT(n, m) + 0x1A4)
195#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1A8)
196#define PCIE_N_OSC_DTCT_ACTIONS(n, m) (PCS_PORT(n, m) + 0x1AC)
197#define PCIE_N_SIGDET_CNTRL(n, m) (PCS_PORT(n, m) + 0x1B0)
198#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(n, m) (PCS_PORT(n, m) + 0x1DC)
199#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1E0)
200
201#define PCIE_COM_SW_RESET 0x400
202#define PCIE_COM_POWER_DOWN_CONTROL 0x404
203#define PCIE_COM_START_CONTROL 0x408
204#define PCIE_COM_DEBUG_BUS_BYTE0_INDEX 0x438
205#define PCIE_COM_DEBUG_BUS_BYTE1_INDEX 0x43C
206#define PCIE_COM_DEBUG_BUS_BYTE2_INDEX 0x440
207#define PCIE_COM_DEBUG_BUS_BYTE3_INDEX 0x444
208#define PCIE_COM_PCS_READY_STATUS 0x448
209#define PCIE_COM_DEBUG_BUS_0_STATUS 0x45C
210#define PCIE_COM_DEBUG_BUS_1_STATUS 0x460
211#define PCIE_COM_DEBUG_BUS_2_STATUS 0x464
212#define PCIE_COM_DEBUG_BUS_3_STATUS 0x468
213
214#define PCIE20_PARF_SYS_CTRL 0x00
215#define PCIE20_PARF_PM_STTS 0x24
216#define PCIE20_PARF_PCS_DEEMPH 0x34
217#define PCIE20_PARF_PCS_SWING 0x38
218#define PCIE20_PARF_PHY_CTRL 0x40
219#define PCIE20_PARF_PHY_REFCLK 0x4C
220#define PCIE20_PARF_CONFIG_BITS 0x50
221#define PCIE20_PARF_TEST_BUS 0xE4
222#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
223#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
224#define PCIE20_PARF_LTSSM 0x1B0
225#define PCIE20_PARF_INT_ALL_STATUS 0x224
226#define PCIE20_PARF_INT_ALL_CLEAR 0x228
227#define PCIE20_PARF_INT_ALL_MASK 0x22C
228#define PCIE20_PARF_SID_OFFSET 0x234
229#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
230#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
231
232#define PCIE20_ELBI_VERSION 0x00
233#define PCIE20_ELBI_SYS_CTRL 0x04
234#define PCIE20_ELBI_SYS_STTS 0x08
235
236#define PCIE20_CAP 0x70
237#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
238#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
239
240#define PCIE20_COMMAND_STATUS 0x04
241#define PCIE20_HEADER_TYPE 0x0C
242#define PCIE20_BUSNUMBERS 0x18
243#define PCIE20_MEMORY_BASE_LIMIT 0x20
244#define PCIE20_BRIDGE_CTRL 0x3C
245#define PCIE20_DEVICE_CONTROL_STATUS 0x78
246#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
247
248#define PCIE20_AUX_CLK_FREQ_REG 0xB40
249#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
250#define PCIE20_ACK_N_FTS 0xff00
251
252#define PCIE20_PLR_IATU_VIEWPORT 0x900
253#define PCIE20_PLR_IATU_CTRL1 0x904
254#define PCIE20_PLR_IATU_CTRL2 0x908
255#define PCIE20_PLR_IATU_LBAR 0x90C
256#define PCIE20_PLR_IATU_UBAR 0x910
257#define PCIE20_PLR_IATU_LAR 0x914
258#define PCIE20_PLR_IATU_LTAR 0x918
259#define PCIE20_PLR_IATU_UTAR 0x91c
260
261#define PCIE20_CTRL1_TYPE_CFG0 0x04
262#define PCIE20_CTRL1_TYPE_CFG1 0x05
263
264#define PCIE20_CAP_ID 0x10
265#define L1SUB_CAP_ID 0x1E
266
267#define PCIE_CAP_PTR_OFFSET 0x34
268#define PCIE_EXT_CAP_OFFSET 0x100
269
270#define PCIE20_AER_UNCORR_ERR_STATUS_REG 0x104
271#define PCIE20_AER_CORR_ERR_STATUS_REG 0x110
272#define PCIE20_AER_ROOT_ERR_STATUS_REG 0x130
273#define PCIE20_AER_ERR_SRC_ID_REG 0x134
274
275#define RD 0
276#define WR 1
277#define MSM_PCIE_ERROR -1
278
279#define PERST_PROPAGATION_DELAY_US_MIN 1000
280#define PERST_PROPAGATION_DELAY_US_MAX 1005
281#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
282#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
283#define LINK_UP_TIMEOUT_US_MIN 5000
284#define LINK_UP_TIMEOUT_US_MAX 5100
285#define LINK_UP_CHECK_MAX_COUNT 20
286#define PHY_STABILIZATION_DELAY_US_MIN 995
287#define PHY_STABILIZATION_DELAY_US_MAX 1005
288#define POWER_DOWN_DELAY_US_MIN 10
289#define POWER_DOWN_DELAY_US_MAX 11
290#define LINKDOWN_INIT_WAITING_US_MIN 995
291#define LINKDOWN_INIT_WAITING_US_MAX 1005
292#define LINKDOWN_WAITING_US_MIN 4900
293#define LINKDOWN_WAITING_US_MAX 5100
294#define LINKDOWN_WAITING_COUNT 200
295
296#define PHY_READY_TIMEOUT_COUNT 10
297#define XMLH_LINK_UP 0x400
298#define MAX_LINK_RETRIES 5
299#define MAX_BUS_NUM 3
300#define MAX_PROP_SIZE 32
301#define MAX_RC_NAME_LEN 15
302#define MSM_PCIE_MAX_VREG 4
303#define MSM_PCIE_MAX_CLK 9
304#define MSM_PCIE_MAX_PIPE_CLK 1
305#define MAX_RC_NUM 3
306#define MAX_DEVICE_NUM 20
307#define MAX_SHORT_BDF_NUM 16
308#define PCIE_TLP_RD_SIZE 0x5
309#define PCIE_MSI_NR_IRQS 256
310#define MSM_PCIE_MAX_MSI 32
311#define MAX_MSG_LEN 80
312#define PCIE_LOG_PAGES (50)
313#define PCIE_CONF_SPACE_DW 1024
314#define PCIE_CLEAR 0xDEADBEEF
315#define PCIE_LINK_DOWN 0xFFFFFFFF
316
317#define MSM_PCIE_MAX_RESET 4
318#define MSM_PCIE_MAX_PIPE_RESET 1
319
320#define MSM_PCIE_MSI_PHY 0xa0000000
321#define PCIE20_MSI_CTRL_ADDR (0x820)
322#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
323#define PCIE20_MSI_CTRL_INTR_EN (0x828)
324#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
325#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
326#define PCIE20_MSI_CTRL_MAX 8
327
328/* PM control options */
329#define PM_IRQ 0x1
330#define PM_CLK 0x2
331#define PM_GPIO 0x4
332#define PM_VREG 0x8
333#define PM_PIPE_CLK 0x10
334#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
335
336#ifdef CONFIG_PHYS_ADDR_T_64BIT
337#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
338#else
339#define PCIE_UPPER_ADDR(addr) (0x0)
340#endif
341#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
342
343/* Config Space Offsets */
344#define BDF_OFFSET(bus, devfn) \
345 ((bus << 24) | (devfn << 16))
346
347#define PCIE_GEN_DBG(x...) do { \
348 if (msm_pcie_debug_mask) \
349 pr_alert(x); \
350 } while (0)
351
352#define PCIE_DBG(dev, fmt, arg...) do { \
353 if ((dev) && (dev)->ipc_log_long) \
354 ipc_log_string((dev)->ipc_log_long, \
355 "DBG1:%s: " fmt, __func__, arg); \
356 if ((dev) && (dev)->ipc_log) \
357 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
358 if (msm_pcie_debug_mask) \
359 pr_alert("%s: " fmt, __func__, arg); \
360 } while (0)
361
362#define PCIE_DBG2(dev, fmt, arg...) do { \
363 if ((dev) && (dev)->ipc_log) \
364 ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
365 if (msm_pcie_debug_mask) \
366 pr_alert("%s: " fmt, __func__, arg); \
367 } while (0)
368
369#define PCIE_DBG3(dev, fmt, arg...) do { \
370 if ((dev) && (dev)->ipc_log) \
371 ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
372 if (msm_pcie_debug_mask) \
373 pr_alert("%s: " fmt, __func__, arg); \
374 } while (0)
375
376#define PCIE_DUMP(dev, fmt, arg...) do { \
377 if ((dev) && (dev)->ipc_log_dump) \
378 ipc_log_string((dev)->ipc_log_dump, \
379 "DUMP:%s: " fmt, __func__, arg); \
380 } while (0)
381
382#define PCIE_DBG_FS(dev, fmt, arg...) do { \
383 if ((dev) && (dev)->ipc_log_dump) \
384 ipc_log_string((dev)->ipc_log_dump, \
385 "DBG_FS:%s: " fmt, __func__, arg); \
386 pr_alert("%s: " fmt, __func__, arg); \
387 } while (0)
388
389#define PCIE_INFO(dev, fmt, arg...) do { \
390 if ((dev) && (dev)->ipc_log_long) \
391 ipc_log_string((dev)->ipc_log_long, \
392 "INFO:%s: " fmt, __func__, arg); \
393 if ((dev) && (dev)->ipc_log) \
394 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
395 pr_info("%s: " fmt, __func__, arg); \
396 } while (0)
397
398#define PCIE_ERR(dev, fmt, arg...) do { \
399 if ((dev) && (dev)->ipc_log_long) \
400 ipc_log_string((dev)->ipc_log_long, \
401 "ERR:%s: " fmt, __func__, arg); \
402 if ((dev) && (dev)->ipc_log) \
403 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
404 pr_err("%s: " fmt, __func__, arg); \
405 } while (0)
406
407
408enum msm_pcie_res {
409 MSM_PCIE_RES_PARF,
410 MSM_PCIE_RES_PHY,
411 MSM_PCIE_RES_DM_CORE,
412 MSM_PCIE_RES_ELBI,
413 MSM_PCIE_RES_CONF,
414 MSM_PCIE_RES_IO,
415 MSM_PCIE_RES_BARS,
416 MSM_PCIE_RES_TCSR,
417 MSM_PCIE_MAX_RES,
418};
419
420enum msm_pcie_irq {
421 MSM_PCIE_INT_MSI,
422 MSM_PCIE_INT_A,
423 MSM_PCIE_INT_B,
424 MSM_PCIE_INT_C,
425 MSM_PCIE_INT_D,
426 MSM_PCIE_INT_PLS_PME,
427 MSM_PCIE_INT_PME_LEGACY,
428 MSM_PCIE_INT_PLS_ERR,
429 MSM_PCIE_INT_AER_LEGACY,
430 MSM_PCIE_INT_LINK_UP,
431 MSM_PCIE_INT_LINK_DOWN,
432 MSM_PCIE_INT_BRIDGE_FLUSH_N,
433 MSM_PCIE_INT_GLOBAL_INT,
434 MSM_PCIE_MAX_IRQ,
435};
436
437enum msm_pcie_irq_event {
438 MSM_PCIE_INT_EVT_LINK_DOWN = 1,
439 MSM_PCIE_INT_EVT_BME,
440 MSM_PCIE_INT_EVT_PM_TURNOFF,
441 MSM_PCIE_INT_EVT_DEBUG,
442 MSM_PCIE_INT_EVT_LTR,
443 MSM_PCIE_INT_EVT_MHI_Q6,
444 MSM_PCIE_INT_EVT_MHI_A7,
445 MSM_PCIE_INT_EVT_DSTATE_CHANGE,
446 MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
447 MSM_PCIE_INT_EVT_MMIO_WRITE,
448 MSM_PCIE_INT_EVT_CFG_WRITE,
449 MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
450 MSM_PCIE_INT_EVT_LINK_UP,
451 MSM_PCIE_INT_EVT_AER_LEGACY,
452 MSM_PCIE_INT_EVT_AER_ERR,
453 MSM_PCIE_INT_EVT_PME_LEGACY,
454 MSM_PCIE_INT_EVT_PLS_PME,
455 MSM_PCIE_INT_EVT_INTD,
456 MSM_PCIE_INT_EVT_INTC,
457 MSM_PCIE_INT_EVT_INTB,
458 MSM_PCIE_INT_EVT_INTA,
459 MSM_PCIE_INT_EVT_EDMA,
460 MSM_PCIE_INT_EVT_MSI_0,
461 MSM_PCIE_INT_EVT_MSI_1,
462 MSM_PCIE_INT_EVT_MSI_2,
463 MSM_PCIE_INT_EVT_MSI_3,
464 MSM_PCIE_INT_EVT_MSI_4,
465 MSM_PCIE_INT_EVT_MSI_5,
466 MSM_PCIE_INT_EVT_MSI_6,
467 MSM_PCIE_INT_EVT_MSI_7,
468 MSM_PCIE_INT_EVT_MAX = 30,
469};
470
471enum msm_pcie_gpio {
472 MSM_PCIE_GPIO_PERST,
473 MSM_PCIE_GPIO_WAKE,
474 MSM_PCIE_GPIO_EP,
475 MSM_PCIE_MAX_GPIO
476};
477
478enum msm_pcie_link_status {
479 MSM_PCIE_LINK_DEINIT,
480 MSM_PCIE_LINK_ENABLED,
481 MSM_PCIE_LINK_DISABLED
482};
483
484/* gpio info structure */
485struct msm_pcie_gpio_info_t {
486 char *name;
487 uint32_t num;
488 bool out;
489 uint32_t on;
490 uint32_t init;
491 bool required;
492};
493
494/* voltage regulator info structrue */
495struct msm_pcie_vreg_info_t {
496 struct regulator *hdl;
497 char *name;
498 uint32_t max_v;
499 uint32_t min_v;
500 uint32_t opt_mode;
501 bool required;
502};
503
504/* reset info structure */
505struct msm_pcie_reset_info_t {
506 struct reset_control *hdl;
507 char *name;
508 bool required;
509};
510
511/* clock info structure */
512struct msm_pcie_clk_info_t {
513 struct clk *hdl;
514 char *name;
515 u32 freq;
516 bool config_mem;
517 bool required;
518};
519
520/* resource info structure */
521struct msm_pcie_res_info_t {
522 char *name;
523 struct resource *resource;
524 void __iomem *base;
525};
526
527/* irq info structrue */
528struct msm_pcie_irq_info_t {
529 char *name;
530 uint32_t num;
531};
532
533/* phy info structure */
534struct msm_pcie_phy_info_t {
535 u32 offset;
536 u32 val;
537 u32 delay;
538};
539
540/* PCIe device info structure */
541struct msm_pcie_device_info {
542 u32 bdf;
543 struct pci_dev *dev;
544 short short_bdf;
545 u32 sid;
546 int domain;
547 void __iomem *conf_base;
548 unsigned long phy_address;
549 u32 dev_ctrlstts_offset;
550 struct msm_pcie_register_event *event_reg;
551 bool registered;
552};
553
554/* msm pcie device structure */
555struct msm_pcie_dev_t {
556 struct platform_device *pdev;
557 struct pci_dev *dev;
558 struct regulator *gdsc;
559 struct regulator *gdsc_smmu;
560 struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
561 struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
562 struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
563 struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
564 struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
565 struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
566 struct msm_pcie_irq_info_t msi[MSM_PCIE_MAX_MSI];
567 struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
568 struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
569
570 void __iomem *parf;
571 void __iomem *phy;
572 void __iomem *elbi;
573 void __iomem *dm_core;
574 void __iomem *conf;
575 void __iomem *bars;
576 void __iomem *tcsr;
577
578 uint32_t axi_bar_start;
579 uint32_t axi_bar_end;
580
581 struct resource *dev_mem_res;
582 struct resource *dev_io_res;
583
584 uint32_t wake_n;
585 uint32_t vreg_n;
586 uint32_t gpio_n;
587 uint32_t parf_deemph;
588 uint32_t parf_swing;
589
590 bool cfg_access;
591 spinlock_t cfg_lock;
592 unsigned long irqsave_flags;
593 struct mutex enumerate_lock;
594 struct mutex setup_lock;
595
596 struct irq_domain *irq_domain;
597 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
598 uint32_t msi_gicm_addr;
599 uint32_t msi_gicm_base;
600 bool use_msi;
601
602 enum msm_pcie_link_status link_status;
603 bool user_suspend;
604 bool disable_pc;
605 struct pci_saved_state *saved_state;
606
607 struct wakeup_source ws;
608 struct msm_bus_scale_pdata *bus_scale_table;
609 uint32_t bus_client;
610
611 bool l0s_supported;
612 bool l1_supported;
613 bool l1ss_supported;
614 bool common_clk_en;
615 bool clk_power_manage_en;
616 bool aux_clk_sync;
617 bool aer_enable;
618 bool smmu_exist;
619 uint32_t smmu_sid_base;
620 uint32_t n_fts;
621 bool ext_ref_clk;
622 bool common_phy;
623 uint32_t ep_latency;
624 uint32_t wr_halt_size;
625 uint32_t cpl_timeout;
626 uint32_t current_bdf;
627 short current_short_bdf;
628 uint32_t perst_delay_us_min;
629 uint32_t perst_delay_us_max;
630 uint32_t tlp_rd_size;
631 bool linkdown_panic;
632 bool ep_wakeirq;
633
634 uint32_t rc_idx;
635 uint32_t phy_ver;
636 bool drv_ready;
637 bool enumerated;
638 struct work_struct handle_wake_work;
639 struct mutex recovery_lock;
640 spinlock_t linkdown_lock;
641 spinlock_t wakeup_lock;
642 spinlock_t global_irq_lock;
643 spinlock_t aer_lock;
644 ulong linkdown_counter;
645 ulong link_turned_on_counter;
646 ulong link_turned_off_counter;
647 ulong rc_corr_counter;
648 ulong rc_non_fatal_counter;
649 ulong rc_fatal_counter;
650 ulong ep_corr_counter;
651 ulong ep_non_fatal_counter;
652 ulong ep_fatal_counter;
653 bool suspending;
654 ulong wake_counter;
655 u32 num_active_ep;
656 u32 num_ep;
657 bool pending_ep_reg;
658 u32 phy_len;
659 u32 port_phy_len;
660 struct msm_pcie_phy_info_t *phy_sequence;
661 struct msm_pcie_phy_info_t *port_phy_sequence;
662 u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
663 u32 rc_shadow[PCIE_CONF_SPACE_DW];
664 bool shadow_en;
665 bool bridge_found;
666 struct msm_pcie_register_event *event_reg;
667 unsigned int scm_dev_id;
668 bool power_on;
669 void *ipc_log;
670 void *ipc_log_long;
671 void *ipc_log_dump;
672 bool use_19p2mhz_aux_clk;
673 bool use_pinctrl;
674 struct pinctrl *pinctrl;
675 struct pinctrl_state *pins_default;
676 struct pinctrl_state *pins_sleep;
677 struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
678};
679
680
681/* debug mask sys interface */
682static int msm_pcie_debug_mask;
683module_param_named(debug_mask, msm_pcie_debug_mask,
684 int, 0644);
685
686/* debugfs values */
687static u32 rc_sel;
688static u32 base_sel;
689static u32 wr_offset;
690static u32 wr_mask;
691static u32 wr_value;
692static ulong corr_counter_limit = 5;
693
694/* counter to keep track if common PHY needs to be configured */
695static u32 num_rc_on;
696
697/* global lock for PCIe common PHY */
698static struct mutex com_phy_lock;
699
700/* Table to track info of PCIe devices */
701static struct msm_pcie_device_info
702 msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
703
704/* PCIe driver state */
705struct pcie_drv_sta {
706 u32 rc_num;
707 struct mutex drv_lock;
708} pcie_drv;
709
710/* msm pcie device data */
711static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
712
713/* regulators */
714static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
715 {NULL, "vreg-3.3", 0, 0, 0, false},
716 {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
717 {NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
718 {NULL, "vreg-cx", 0, 0, 0, false}
719};
720
721/* GPIOs */
722static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
723 {"perst-gpio", 0, 1, 0, 0, 1},
724 {"wake-gpio", 0, 0, 0, 0, 0},
725 {"qcom,ep-gpio", 0, 1, 1, 0, 0}
726};
727
728/* resets */
729static struct msm_pcie_reset_info_t
730msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
731 {
732 {NULL, "pcie_phy_reset", false},
733 {NULL, "pcie_phy_com_reset", false},
734 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
735 {NULL, "pcie_0_phy_reset", false}
736 },
737 {
738 {NULL, "pcie_phy_reset", false},
739 {NULL, "pcie_phy_com_reset", false},
740 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
741 {NULL, "pcie_1_phy_reset", false}
742 },
743 {
744 {NULL, "pcie_phy_reset", false},
745 {NULL, "pcie_phy_com_reset", false},
746 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
747 {NULL, "pcie_2_phy_reset", false}
748 }
749};
750
751/* pipe reset */
752static struct msm_pcie_reset_info_t
753msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
754 {
755 {NULL, "pcie_0_phy_pipe_reset", false}
756 },
757 {
758 {NULL, "pcie_1_phy_pipe_reset", false}
759 },
760 {
761 {NULL, "pcie_2_phy_pipe_reset", false}
762 }
763};
764
765/* clocks */
766static struct msm_pcie_clk_info_t
767 msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
768 {
769 {NULL, "pcie_0_ref_clk_src", 0, false, false},
770 {NULL, "pcie_0_aux_clk", 1010000, false, true},
771 {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
772 {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
773 {NULL, "pcie_0_slv_axi_clk", 0, true, true},
774 {NULL, "pcie_0_ldo", 0, false, true},
775 {NULL, "pcie_0_smmu_clk", 0, false, false},
776 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
777 {NULL, "pcie_phy_aux_clk", 0, false, false}
778 },
779 {
780 {NULL, "pcie_1_ref_clk_src", 0, false, false},
781 {NULL, "pcie_1_aux_clk", 1010000, false, true},
782 {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
783 {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
784 {NULL, "pcie_1_slv_axi_clk", 0, true, true},
785 {NULL, "pcie_1_ldo", 0, false, true},
786 {NULL, "pcie_1_smmu_clk", 0, false, false},
787 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
788 {NULL, "pcie_phy_aux_clk", 0, false, false}
789 },
790 {
791 {NULL, "pcie_2_ref_clk_src", 0, false, false},
792 {NULL, "pcie_2_aux_clk", 1010000, false, true},
793 {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
794 {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
795 {NULL, "pcie_2_slv_axi_clk", 0, true, true},
796 {NULL, "pcie_2_ldo", 0, false, true},
797 {NULL, "pcie_2_smmu_clk", 0, false, false},
798 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
799 {NULL, "pcie_phy_aux_clk", 0, false, false}
800 }
801};
802
803/* Pipe Clocks */
804static struct msm_pcie_clk_info_t
805 msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
806 {
807 {NULL, "pcie_0_pipe_clk", 125000000, true, true},
808 },
809 {
810 {NULL, "pcie_1_pipe_clk", 125000000, true, true},
811 },
812 {
813 {NULL, "pcie_2_pipe_clk", 125000000, true, true},
814 }
815};
816
817/* resources */
818static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
819 {"parf", 0, 0},
820 {"phy", 0, 0},
821 {"dm_core", 0, 0},
822 {"elbi", 0, 0},
823 {"conf", 0, 0},
824 {"io", 0, 0},
825 {"bars", 0, 0},
826 {"tcsr", 0, 0}
827};
828
829/* irqs */
830static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
831 {"int_msi", 0},
832 {"int_a", 0},
833 {"int_b", 0},
834 {"int_c", 0},
835 {"int_d", 0},
836 {"int_pls_pme", 0},
837 {"int_pme_legacy", 0},
838 {"int_pls_err", 0},
839 {"int_aer_legacy", 0},
840 {"int_pls_link_up", 0},
841 {"int_pls_link_down", 0},
842 {"int_bridge_flush_n", 0},
843 {"int_global_int", 0}
844};
845
846/* MSIs */
847static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
848 {"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
849 {"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
850 {"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
851 {"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
852 {"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
853 {"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
854 {"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
855 {"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
856};
857
858#ifdef CONFIG_ARM
859#define PCIE_BUS_PRIV_DATA(bus) \
860 (((struct pci_sys_data *)bus->sysdata)->private_data)
861
862static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
863
864static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
865{
866 msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
867 msm_pcie_sys_data[dev->rc_idx].private_data = dev;
868
869 return &msm_pcie_sys_data[dev->rc_idx];
870}
871
872static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
873{
874 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
875}
876#else
877#define PCIE_BUS_PRIV_DATA(bus) \
878 (struct msm_pcie_dev_t *)(bus->sysdata)
879
880static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
881{
882 return dev;
883}
884
885static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
886{
887}
888#endif
889
890static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value)
891{
892 writel_relaxed(value, base + offset);
893 /* ensure that changes propagated to the hardware */
894 wmb();
895}
896
897static inline void msm_pcie_write_reg_field(void *base, u32 offset,
898 const u32 mask, u32 val)
899{
900 u32 shift = find_first_bit((void *)&mask, 32);
901 u32 tmp = readl_relaxed(base + offset);
902
903 tmp &= ~mask; /* clear written bits */
904 val = tmp | (val << shift);
905 writel_relaxed(val, base + offset);
906 /* ensure that changes propagated to the hardware */
907 wmb();
908}
909
910static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
911 struct msm_pcie_clk_info_t *info)
912{
913 int ret;
914
915 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
916 if (ret)
917 PCIE_ERR(dev,
918 "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
919 dev->rc_idx, info->name, ret);
920 else
921 PCIE_DBG2(dev,
922 "PCIe: RC%d configured core memory for clk %s.\n",
923 dev->rc_idx, info->name);
924
925 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
926 if (ret)
927 PCIE_ERR(dev,
928 "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
929 dev->rc_idx, info->name, ret);
930 else
931 PCIE_DBG2(dev,
932 "PCIe: RC%d configured peripheral memory for clk %s.\n",
933 dev->rc_idx, info->name);
934}
935
936#if defined(CONFIG_ARCH_FSM9010)
937#define PCIE20_PARF_PHY_STTS 0x3c
938#define PCIE2_PHY_RESET_CTRL 0x44
939#define PCIE20_PARF_PHY_REFCLK_CTRL2 0xa0
940#define PCIE20_PARF_PHY_REFCLK_CTRL3 0xa4
941#define PCIE20_PARF_PCS_SWING_CTRL1 0x88
942#define PCIE20_PARF_PCS_SWING_CTRL2 0x8c
943#define PCIE20_PARF_PCS_DEEMPH1 0x74
944#define PCIE20_PARF_PCS_DEEMPH2 0x78
945#define PCIE20_PARF_PCS_DEEMPH3 0x7c
946#define PCIE20_PARF_CONFIGBITS 0x84
947#define PCIE20_PARF_PHY_CTRL3 0x94
948#define PCIE20_PARF_PCS_CTRL 0x80
949
950#define TX_AMP_VAL 127
951#define PHY_RX0_EQ_GEN1_VAL 0
952#define PHY_RX0_EQ_GEN2_VAL 4
953#define TX_DEEMPH_GEN1_VAL 24
954#define TX_DEEMPH_GEN2_3_5DB_VAL 24
955#define TX_DEEMPH_GEN2_6DB_VAL 34
956#define PHY_TX0_TERM_OFFST_VAL 0
957
958static inline void pcie_phy_dump(struct msm_pcie_dev_t *dev)
959{
960}
961
962static inline void pcie20_phy_reset(struct msm_pcie_dev_t *dev, uint32_t assert)
963{
964 msm_pcie_write_reg_field(dev->phy, PCIE2_PHY_RESET_CTRL,
965 BIT(0), (assert) ? 1 : 0);
966}
967
968static void pcie_phy_init(struct msm_pcie_dev_t *dev)
969{
970 PCIE_DBG(dev, "RC%d: Initializing 28LP SNS phy - 100MHz\n",
971 dev->rc_idx);
972
973 /* De-assert Phy SW Reset */
974 pcie20_phy_reset(dev, 1);
975
976 /* Program SSP ENABLE */
977 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL2) & BIT(0))
978 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2,
979 BIT(0), 0);
980 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL3) &
981 BIT(0)) == 0)
982 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL3,
983 BIT(0), 1);
984 /* Program Tx Amplitude */
985 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL1) &
986 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
987 TX_AMP_VAL)
988 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL1,
989 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
990 TX_AMP_VAL);
991 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL2) &
992 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
993 TX_AMP_VAL)
994 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL2,
995 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
996 TX_AMP_VAL);
997 /* Program De-Emphasis */
998 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH1) &
999 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1000 TX_DEEMPH_GEN2_6DB_VAL)
1001 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH1,
1002 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1003 TX_DEEMPH_GEN2_6DB_VAL);
1004
1005 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH2) &
1006 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1007 TX_DEEMPH_GEN2_3_5DB_VAL)
1008 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH2,
1009 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1010 TX_DEEMPH_GEN2_3_5DB_VAL);
1011
1012 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH3) &
1013 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1014 TX_DEEMPH_GEN1_VAL)
1015 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH3,
1016 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1017 TX_DEEMPH_GEN1_VAL);
1018
1019 /* Program Rx_Eq */
1020 if ((readl_relaxed(dev->phy + PCIE20_PARF_CONFIGBITS) &
1021 (BIT(2)|BIT(1)|BIT(0))) != PHY_RX0_EQ_GEN1_VAL)
1022 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_CONFIGBITS,
1023 BIT(2)|BIT(1)|BIT(0), PHY_RX0_EQ_GEN1_VAL);
1024
1025 /* Program Tx0_term_offset */
1026 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_CTRL3) &
1027 (BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1028 PHY_TX0_TERM_OFFST_VAL)
1029 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_CTRL3,
1030 BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1031 PHY_TX0_TERM_OFFST_VAL);
1032
1033 /* Program REF_CLK source */
1034 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2, BIT(1),
1035 (dev->ext_ref_clk) ? 1 : 0);
1036 /* disable Tx2Rx Loopback */
1037 if (readl_relaxed(dev->phy + PCIE20_PARF_PCS_CTRL) & BIT(1))
1038 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_CTRL,
1039 BIT(1), 0);
1040 /* De-assert Phy SW Reset */
1041 pcie20_phy_reset(dev, 0);
1042}
1043
1044static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1045{
1046
1047 /* read PCIE20_PARF_PHY_STTS twice */
1048 readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS);
1049 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS) & BIT(0))
1050 return false;
1051 else
1052 return true;
1053}
1054#else
1055static void pcie_phy_dump_test_cntrl(struct msm_pcie_dev_t *dev,
1056 u32 cntrl4_val, u32 cntrl5_val,
1057 u32 cntrl6_val, u32 cntrl7_val)
1058{
1059 msm_pcie_write_reg(dev->phy,
1060 PCIE_N_TEST_CONTROL4(dev->rc_idx, dev->common_phy), cntrl4_val);
1061 msm_pcie_write_reg(dev->phy,
1062 PCIE_N_TEST_CONTROL5(dev->rc_idx, dev->common_phy), cntrl5_val);
1063 msm_pcie_write_reg(dev->phy,
1064 PCIE_N_TEST_CONTROL6(dev->rc_idx, dev->common_phy), cntrl6_val);
1065 msm_pcie_write_reg(dev->phy,
1066 PCIE_N_TEST_CONTROL7(dev->rc_idx, dev->common_phy), cntrl7_val);
1067
1068 PCIE_DUMP(dev,
1069 "PCIe: RC%d PCIE_N_TEST_CONTROL4: 0x%x\n", dev->rc_idx,
1070 readl_relaxed(dev->phy +
1071 PCIE_N_TEST_CONTROL4(dev->rc_idx,
1072 dev->common_phy)));
1073 PCIE_DUMP(dev,
1074 "PCIe: RC%d PCIE_N_TEST_CONTROL5: 0x%x\n", dev->rc_idx,
1075 readl_relaxed(dev->phy +
1076 PCIE_N_TEST_CONTROL5(dev->rc_idx,
1077 dev->common_phy)));
1078 PCIE_DUMP(dev,
1079 "PCIe: RC%d PCIE_N_TEST_CONTROL6: 0x%x\n", dev->rc_idx,
1080 readl_relaxed(dev->phy +
1081 PCIE_N_TEST_CONTROL6(dev->rc_idx,
1082 dev->common_phy)));
1083 PCIE_DUMP(dev,
1084 "PCIe: RC%d PCIE_N_TEST_CONTROL7: 0x%x\n", dev->rc_idx,
1085 readl_relaxed(dev->phy +
1086 PCIE_N_TEST_CONTROL7(dev->rc_idx,
1087 dev->common_phy)));
1088 PCIE_DUMP(dev,
1089 "PCIe: RC%d PCIE_N_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rc_idx,
1090 readl_relaxed(dev->phy +
1091 PCIE_N_DEBUG_BUS_0_STATUS(dev->rc_idx,
1092 dev->common_phy)));
1093 PCIE_DUMP(dev,
1094 "PCIe: RC%d PCIE_N_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rc_idx,
1095 readl_relaxed(dev->phy +
1096 PCIE_N_DEBUG_BUS_1_STATUS(dev->rc_idx,
1097 dev->common_phy)));
1098 PCIE_DUMP(dev,
1099 "PCIe: RC%d PCIE_N_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rc_idx,
1100 readl_relaxed(dev->phy +
1101 PCIE_N_DEBUG_BUS_2_STATUS(dev->rc_idx,
1102 dev->common_phy)));
1103 PCIE_DUMP(dev,
1104 "PCIe: RC%d PCIE_N_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rc_idx,
1105 readl_relaxed(dev->phy +
1106 PCIE_N_DEBUG_BUS_3_STATUS(dev->rc_idx,
1107 dev->common_phy)));
1108}
1109
1110static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
1111{
1112 int i, size;
1113 u32 write_val;
1114
1115 if (dev->phy_ver >= 0x20) {
1116 PCIE_DUMP(dev, "PCIe: RC%d PHY dump is not supported\n",
1117 dev->rc_idx);
1118 return;
1119 }
1120
1121 PCIE_DUMP(dev, "PCIe: RC%d PHY testbus\n", dev->rc_idx);
1122
1123 pcie_phy_dump_test_cntrl(dev, 0x18, 0x19, 0x1A, 0x1B);
1124 pcie_phy_dump_test_cntrl(dev, 0x1C, 0x1D, 0x1E, 0x1F);
1125 pcie_phy_dump_test_cntrl(dev, 0x20, 0x21, 0x22, 0x23);
1126
1127 for (i = 0; i < 3; i++) {
1128 write_val = 0x1 + i;
1129 msm_pcie_write_reg(dev->phy,
1130 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1131 dev->common_phy), write_val);
1132 PCIE_DUMP(dev,
1133 "PCIe: RC%d QSERDES_TX_N_DEBUG_BUS_SEL: 0x%x\n",
1134 dev->rc_idx,
1135 readl_relaxed(dev->phy +
1136 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1137 dev->common_phy)));
1138
1139 pcie_phy_dump_test_cntrl(dev, 0x30, 0x31, 0x32, 0x33);
1140 }
1141
1142 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1143
1144 if (dev->phy_ver >= 0x10 && dev->phy_ver < 0x20) {
1145 pcie_phy_dump_test_cntrl(dev, 0x01, 0x02, 0x03, 0x0A);
1146 pcie_phy_dump_test_cntrl(dev, 0x0E, 0x0F, 0x12, 0x13);
1147 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1148
1149 for (i = 0; i < 8; i += 4) {
1150 write_val = 0x1 + i;
1151 msm_pcie_write_reg(dev->phy,
1152 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(dev->rc_idx,
1153 dev->common_phy), write_val);
1154 msm_pcie_write_reg(dev->phy,
1155 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(dev->rc_idx,
1156 dev->common_phy), write_val + 1);
1157 msm_pcie_write_reg(dev->phy,
1158 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(dev->rc_idx,
1159 dev->common_phy), write_val + 2);
1160 msm_pcie_write_reg(dev->phy,
1161 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(dev->rc_idx,
1162 dev->common_phy), write_val + 3);
1163
1164 PCIE_DUMP(dev,
1165 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1166 dev->rc_idx,
1167 readl_relaxed(dev->phy +
1168 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1169 dev->rc_idx, dev->common_phy)));
1170 PCIE_DUMP(dev,
1171 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1172 dev->rc_idx,
1173 readl_relaxed(dev->phy +
1174 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1175 dev->rc_idx, dev->common_phy)));
1176 PCIE_DUMP(dev,
1177 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1178 dev->rc_idx,
1179 readl_relaxed(dev->phy +
1180 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1181 dev->rc_idx, dev->common_phy)));
1182 PCIE_DUMP(dev,
1183 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1184 dev->rc_idx,
1185 readl_relaxed(dev->phy +
1186 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1187 dev->rc_idx, dev->common_phy)));
1188 PCIE_DUMP(dev,
1189 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_0_STATUS: 0x%x\n",
1190 dev->rc_idx,
1191 readl_relaxed(dev->phy +
1192 PCIE_MISC_N_DEBUG_BUS_0_STATUS(
1193 dev->rc_idx, dev->common_phy)));
1194 PCIE_DUMP(dev,
1195 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_1_STATUS: 0x%x\n",
1196 dev->rc_idx,
1197 readl_relaxed(dev->phy +
1198 PCIE_MISC_N_DEBUG_BUS_1_STATUS(
1199 dev->rc_idx, dev->common_phy)));
1200 PCIE_DUMP(dev,
1201 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_2_STATUS: 0x%x\n",
1202 dev->rc_idx,
1203 readl_relaxed(dev->phy +
1204 PCIE_MISC_N_DEBUG_BUS_2_STATUS(
1205 dev->rc_idx, dev->common_phy)));
1206 PCIE_DUMP(dev,
1207 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_3_STATUS: 0x%x\n",
1208 dev->rc_idx,
1209 readl_relaxed(dev->phy +
1210 PCIE_MISC_N_DEBUG_BUS_3_STATUS(
1211 dev->rc_idx, dev->common_phy)));
1212 }
1213
1214 msm_pcie_write_reg(dev->phy,
1215 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1216 dev->rc_idx, dev->common_phy), 0);
1217 msm_pcie_write_reg(dev->phy,
1218 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1219 dev->rc_idx, dev->common_phy), 0);
1220 msm_pcie_write_reg(dev->phy,
1221 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1222 dev->rc_idx, dev->common_phy), 0);
1223 msm_pcie_write_reg(dev->phy,
1224 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1225 dev->rc_idx, dev->common_phy), 0);
1226 }
1227
1228 for (i = 0; i < 2; i++) {
1229 write_val = 0x2 + i;
1230
1231 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
1232 write_val);
1233
1234 PCIE_DUMP(dev,
1235 "PCIe: RC%d to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
1236 dev->rc_idx,
1237 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
1238 PCIE_DUMP(dev,
1239 "PCIe: RC%d QSERDES_COM_DEBUG_BUS0: 0x%x\n",
1240 dev->rc_idx,
1241 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
1242 PCIE_DUMP(dev,
1243 "PCIe: RC%d QSERDES_COM_DEBUG_BUS1: 0x%x\n",
1244 dev->rc_idx,
1245 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
1246 PCIE_DUMP(dev,
1247 "PCIe: RC%d QSERDES_COM_DEBUG_BUS2: 0x%x\n",
1248 dev->rc_idx,
1249 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
1250 PCIE_DUMP(dev,
1251 "PCIe: RC%d QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
1252 dev->rc_idx,
1253 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
1254 }
1255
1256 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
1257
1258 if (dev->common_phy) {
1259 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1260 0x01);
1261 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE1_INDEX,
1262 0x02);
1263 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE2_INDEX,
1264 0x03);
1265 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE3_INDEX,
1266 0x04);
1267
1268 PCIE_DUMP(dev,
1269 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1270 dev->rc_idx,
1271 readl_relaxed(dev->phy +
1272 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1273 PCIE_DUMP(dev,
1274 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1275 dev->rc_idx,
1276 readl_relaxed(dev->phy +
1277 PCIE_COM_DEBUG_BUS_BYTE1_INDEX));
1278 PCIE_DUMP(dev,
1279 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1280 dev->rc_idx,
1281 readl_relaxed(dev->phy +
1282 PCIE_COM_DEBUG_BUS_BYTE2_INDEX));
1283 PCIE_DUMP(dev,
1284 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1285 dev->rc_idx,
1286 readl_relaxed(dev->phy +
1287 PCIE_COM_DEBUG_BUS_BYTE3_INDEX));
1288 PCIE_DUMP(dev,
1289 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n",
1290 dev->rc_idx,
1291 readl_relaxed(dev->phy +
1292 PCIE_COM_DEBUG_BUS_0_STATUS));
1293 PCIE_DUMP(dev,
1294 "PCIe: RC%d PCIE_COM_DEBUG_BUS_1_STATUS: 0x%x\n",
1295 dev->rc_idx,
1296 readl_relaxed(dev->phy +
1297 PCIE_COM_DEBUG_BUS_1_STATUS));
1298 PCIE_DUMP(dev,
1299 "PCIe: RC%d PCIE_COM_DEBUG_BUS_2_STATUS: 0x%x\n",
1300 dev->rc_idx,
1301 readl_relaxed(dev->phy +
1302 PCIE_COM_DEBUG_BUS_2_STATUS));
1303 PCIE_DUMP(dev,
1304 "PCIe: RC%d PCIE_COM_DEBUG_BUS_3_STATUS: 0x%x\n",
1305 dev->rc_idx,
1306 readl_relaxed(dev->phy +
1307 PCIE_COM_DEBUG_BUS_3_STATUS));
1308
1309 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1310 0x05);
1311
1312 PCIE_DUMP(dev,
1313 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1314 dev->rc_idx,
1315 readl_relaxed(dev->phy +
1316 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1317 PCIE_DUMP(dev,
1318 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n\n",
1319 dev->rc_idx,
1320 readl_relaxed(dev->phy +
1321 PCIE_COM_DEBUG_BUS_0_STATUS));
1322 }
1323
1324 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
1325 for (i = 0; i < size; i += 32) {
1326 PCIE_DUMP(dev,
1327 "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1328 dev->rc_idx, i,
1329 readl_relaxed(dev->phy + i),
1330 readl_relaxed(dev->phy + (i + 4)),
1331 readl_relaxed(dev->phy + (i + 8)),
1332 readl_relaxed(dev->phy + (i + 12)),
1333 readl_relaxed(dev->phy + (i + 16)),
1334 readl_relaxed(dev->phy + (i + 20)),
1335 readl_relaxed(dev->phy + (i + 24)),
1336 readl_relaxed(dev->phy + (i + 28)));
1337 }
1338}
1339
1340#ifdef CONFIG_ARCH_MDMCALIFORNIUM
1341static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1342{
1343 u8 common_phy;
1344
1345 PCIE_DBG(dev,
1346 "RC%d: Initializing MDM 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1347 dev->rc_idx);
1348
1349 if (dev->common_phy)
1350 common_phy = 1;
1351 else
1352 common_phy = 0;
1353
1354 msm_pcie_write_reg(dev->phy,
1355 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1356 0x01);
1357 msm_pcie_write_reg(dev->phy,
1358 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1359 0x03);
1360
1361 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
1362 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1363
1364 msm_pcie_write_reg(dev->phy,
1365 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy), 0x06);
1366
1367 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
1368 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1369 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1370 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1371 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1372 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1373 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1374 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1375 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
1376 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1377 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1378
1379 if (dev->tcsr) {
1380 PCIE_DBG(dev, "RC%d: TCSR PHY clock scheme is 0x%x\n",
1381 dev->rc_idx, readl_relaxed(dev->tcsr));
1382
1383 if (readl_relaxed(dev->tcsr) & (BIT(1) | BIT(0)))
1384 msm_pcie_write_reg(dev->phy,
1385 QSERDES_COM_SYSCLK_EN_SEL, 0x0A);
1386 else
1387 msm_pcie_write_reg(dev->phy,
1388 QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1389 }
1390
1391 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1392 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1393 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1394 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1395 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1396 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x0D);
1397 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x04);
1398 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1399 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1400 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1401 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1402 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1403 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1404 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1405 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1406
1407 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1408 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1409 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1410 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1411 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1412 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1413 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1414
1415 msm_pcie_write_reg(dev->phy,
1416 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1417 common_phy), 0x45);
1418
1419 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1420
1421 msm_pcie_write_reg(dev->phy,
1422 QSERDES_TX_N_RES_CODE_LANE_OFFSET(dev->rc_idx, common_phy),
1423 0x02);
1424 msm_pcie_write_reg(dev->phy,
1425 QSERDES_TX_N_RCV_DETECT_LVL_2(dev->rc_idx, common_phy),
1426 0x12);
1427
1428 msm_pcie_write_reg(dev->phy,
1429 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1430 0x1C);
1431 msm_pcie_write_reg(dev->phy,
1432 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1433 0x14);
1434 msm_pcie_write_reg(dev->phy,
1435 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1436 0x01);
1437 msm_pcie_write_reg(dev->phy,
1438 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1439 0x00);
1440 msm_pcie_write_reg(dev->phy,
1441 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1442 0xDB);
1443 msm_pcie_write_reg(dev->phy,
1444 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1445 common_phy),
1446 0x4B);
1447 msm_pcie_write_reg(dev->phy,
1448 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1449 0x04);
1450 msm_pcie_write_reg(dev->phy,
1451 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1452 0x04);
1453
1454 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1455
1456 msm_pcie_write_reg(dev->phy,
1457 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1458 0x04);
1459 msm_pcie_write_reg(dev->phy,
1460 PCIE_N_OSC_DTCT_ACTIONS(dev->rc_idx, common_phy),
1461 0x00);
1462 msm_pcie_write_reg(dev->phy,
1463 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1464 0x40);
1465 msm_pcie_write_reg(dev->phy,
1466 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1467 0x00);
1468 msm_pcie_write_reg(dev->phy,
1469 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(dev->rc_idx, common_phy),
1470 0x40);
1471 msm_pcie_write_reg(dev->phy,
1472 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1473 0x00);
1474 msm_pcie_write_reg(dev->phy,
1475 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1476 0x40);
1477 msm_pcie_write_reg(dev->phy,
1478 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1479 0x73);
1480 msm_pcie_write_reg(dev->phy,
1481 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1482 0x99);
1483 msm_pcie_write_reg(dev->phy,
1484 PCIE_N_TXDEEMPH_M6DB_V0(dev->rc_idx, common_phy),
1485 0x15);
1486 msm_pcie_write_reg(dev->phy,
1487 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1488 0x0E);
1489
1490 msm_pcie_write_reg(dev->phy,
1491 PCIE_N_SIGDET_CNTRL(dev->rc_idx, common_phy),
1492 0x07);
1493
1494 msm_pcie_write_reg(dev->phy,
1495 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1496 0x00);
1497 msm_pcie_write_reg(dev->phy,
1498 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1499 0x03);
1500}
1501
1502static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1503{
1504}
1505
1506static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1507{
1508 if (readl_relaxed(dev->phy +
1509 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) & BIT(6))
1510 return false;
1511 else
1512 return true;
1513}
1514#else
1515static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1516{
1517 int i;
1518 struct msm_pcie_phy_info_t *phy_seq;
1519
1520 PCIE_DBG(dev,
1521 "RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1522 dev->rc_idx);
1523
1524 if (dev->phy_sequence) {
1525 i = dev->phy_len;
1526 phy_seq = dev->phy_sequence;
1527 while (i--) {
1528 msm_pcie_write_reg(dev->phy,
1529 phy_seq->offset,
1530 phy_seq->val);
1531 if (phy_seq->delay)
1532 usleep_range(phy_seq->delay,
1533 phy_seq->delay + 1);
1534 phy_seq++;
1535 }
1536 return;
1537 }
1538
1539 if (dev->common_phy)
1540 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0x01);
1541
1542 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1C);
1543 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1544 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1545 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1546 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x42);
1547 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1548 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1549 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1550 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x01);
1551 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1552 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x00);
1553 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1554 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1555 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1556 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1557 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1558 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1559 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1560 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
1561 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
1562 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1563 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1564 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1565 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1566 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1567 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1568 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1569 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1570 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1571 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1572 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1573 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1574 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1575 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1576 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1577 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1578
1579 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
1580 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1581 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1582
1583 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1584 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1585
1586 if (dev->phy_ver == 0x3) {
1587 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1588 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x40);
1589 }
1590
1591 if (dev->common_phy) {
1592 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x00);
1593 msm_pcie_write_reg(dev->phy, PCIE_COM_START_CONTROL, 0x03);
1594 }
1595}
1596
1597static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1598{
1599 int i;
1600 struct msm_pcie_phy_info_t *phy_seq;
1601 u8 common_phy;
1602
1603 if (dev->phy_ver >= 0x20)
1604 return;
1605
1606 PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
1607
1608 if (dev->common_phy)
1609 common_phy = 1;
1610 else
1611 common_phy = 0;
1612
1613 if (dev->port_phy_sequence) {
1614 i = dev->port_phy_len;
1615 phy_seq = dev->port_phy_sequence;
1616 while (i--) {
1617 msm_pcie_write_reg(dev->phy,
1618 phy_seq->offset,
1619 phy_seq->val);
1620 if (phy_seq->delay)
1621 usleep_range(phy_seq->delay,
1622 phy_seq->delay + 1);
1623 phy_seq++;
1624 }
1625 return;
1626 }
1627
1628 msm_pcie_write_reg(dev->phy,
1629 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1630 common_phy), 0x45);
1631 msm_pcie_write_reg(dev->phy,
1632 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy),
1633 0x06);
1634
1635 msm_pcie_write_reg(dev->phy,
1636 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1637 0x1C);
1638 msm_pcie_write_reg(dev->phy,
1639 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1640 0x17);
1641 msm_pcie_write_reg(dev->phy,
1642 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1643 0x01);
1644 msm_pcie_write_reg(dev->phy,
1645 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1646 0x00);
1647 msm_pcie_write_reg(dev->phy,
1648 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1649 0xDB);
1650 msm_pcie_write_reg(dev->phy,
1651 QSERDES_RX_N_RX_BAND(dev->rc_idx, common_phy),
1652 0x18);
1653 msm_pcie_write_reg(dev->phy,
1654 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1655 0x04);
1656 msm_pcie_write_reg(dev->phy,
1657 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1658 0x04);
1659 msm_pcie_write_reg(dev->phy,
1660 PCIE_N_RX_IDLE_DTCT_CNTRL(dev->rc_idx, common_phy),
1661 0x4C);
1662 msm_pcie_write_reg(dev->phy,
1663 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1664 0x00);
1665 msm_pcie_write_reg(dev->phy,
1666 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1667 0x01);
1668 msm_pcie_write_reg(dev->phy,
1669 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1670 0x05);
1671 msm_pcie_write_reg(dev->phy,
1672 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1673 common_phy), 0x4B);
1674 msm_pcie_write_reg(dev->phy,
1675 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1676 0x14);
1677
1678 msm_pcie_write_reg(dev->phy,
1679 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1680 0x05);
1681 msm_pcie_write_reg(dev->phy,
1682 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1683 0x02);
1684 msm_pcie_write_reg(dev->phy,
1685 PCIE_N_POWER_STATE_CONFIG4(dev->rc_idx, common_phy),
1686 0x00);
1687 msm_pcie_write_reg(dev->phy,
1688 PCIE_N_POWER_STATE_CONFIG1(dev->rc_idx, common_phy),
1689 0xA3);
1690
1691 if (dev->phy_ver == 0x3) {
1692 msm_pcie_write_reg(dev->phy,
1693 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1694 0x19);
1695
1696 msm_pcie_write_reg(dev->phy,
1697 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1698 0x0E);
1699 }
1700
1701 msm_pcie_write_reg(dev->phy,
1702 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1703 0x03);
1704 usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
1705
1706 msm_pcie_write_reg(dev->phy,
1707 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1708 0x00);
1709 msm_pcie_write_reg(dev->phy,
1710 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1711 0x0A);
1712}
1713
1714static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1715{
1716 if (dev->phy_ver >= 0x20) {
1717 if (readl_relaxed(dev->phy +
1718 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) &
1719 BIT(6))
1720 return false;
1721 else
1722 return true;
1723 }
1724
1725 if (!(readl_relaxed(dev->phy + PCIE_COM_PCS_READY_STATUS) & 0x1))
1726 return false;
1727 else
1728 return true;
1729}
1730#endif
1731#endif
1732
1733static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
1734{
1735 int ret, scm_ret;
1736
1737 if (!dev) {
1738 pr_err("PCIe: the input pcie dev is NULL.\n");
1739 return -ENODEV;
1740 }
1741
1742 ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
1743 if (ret || scm_ret) {
1744 PCIE_ERR(dev,
1745 "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
1746 dev->rc_idx, ret, scm_ret);
1747 return ret ? ret : -EINVAL;
1748 }
1749
1750 return 0;
1751}
1752
1753static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
1754 u32 offset)
1755{
1756 if (offset % 4) {
1757 PCIE_ERR(dev,
1758 "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
1759 dev->rc_idx, offset);
1760 return MSM_PCIE_ERROR;
1761 }
1762
1763 return 0;
1764}
1765
1766static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
1767 bool check_sw_stts,
1768 bool check_ep,
1769 void __iomem *ep_conf)
1770{
1771 u32 val;
1772
1773 if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
1774 PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
1775 dev->rc_idx);
1776 return false;
1777 }
1778
1779 if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
1780 PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
1781 dev->rc_idx);
1782 return false;
1783 }
1784
1785 val = readl_relaxed(dev->dm_core);
1786 PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
1787 dev->rc_idx, val);
1788 if (val == PCIE_LINK_DOWN) {
1789 PCIE_ERR(dev,
1790 "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
1791 dev->rc_idx, dev->rc_idx, val);
1792 return false;
1793 }
1794
1795 if (check_ep) {
1796 val = readl_relaxed(ep_conf);
1797 PCIE_DBG(dev,
1798 "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
1799 dev->rc_idx, val);
1800 if (val == PCIE_LINK_DOWN) {
1801 PCIE_ERR(dev,
1802 "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
1803 dev->rc_idx, dev->rc_idx, val);
1804 return false;
1805 }
1806 }
1807
1808 return true;
1809}
1810
1811static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
1812{
1813 int i, j;
1814 u32 val = 0;
1815 u32 *shadow;
1816 void *cfg = dev->conf;
1817
1818 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1819 if (!rc && !dev->pcidev_table[i].bdf)
1820 break;
1821 if (rc) {
1822 cfg = dev->dm_core;
1823 shadow = dev->rc_shadow;
1824 } else {
1825 if (!msm_pcie_confirm_linkup(dev, false, true,
1826 dev->pcidev_table[i].conf_base))
1827 continue;
1828
1829 shadow = dev->ep_shadow[i];
1830 PCIE_DBG(dev,
1831 "PCIe Device: %02x:%02x.%01x\n",
1832 dev->pcidev_table[i].bdf >> 24,
1833 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1834 dev->pcidev_table[i].bdf >> 16 & 0x07);
1835 }
1836 for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
1837 val = shadow[j];
1838 if (val != PCIE_CLEAR) {
1839 PCIE_DBG3(dev,
1840 "PCIe: before recovery:cfg 0x%x:0x%x\n",
1841 j * 4, readl_relaxed(cfg + j * 4));
1842 PCIE_DBG3(dev,
1843 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1844 j, j * 4, val);
1845 writel_relaxed(val, cfg + j * 4);
1846 /* ensure changes propagated to the hardware */
1847 wmb();
1848 PCIE_DBG3(dev,
1849 "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
1850 j * 4, readl_relaxed(cfg + j * 4));
1851 }
1852 }
1853 if (rc)
1854 break;
1855
1856 pci_save_state(dev->pcidev_table[i].dev);
1857 cfg += SZ_4K;
1858 }
1859}
1860
1861static void msm_pcie_write_mask(void __iomem *addr,
1862 uint32_t clear_mask, uint32_t set_mask)
1863{
1864 uint32_t val;
1865
1866 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
1867 writel_relaxed(val, addr);
1868 wmb(); /* ensure data is written to hardware register */
1869}
1870
1871static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
1872{
1873 int i, size;
1874 u32 original;
1875
1876 PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
1877
1878 original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
1879 for (i = 1; i <= 0x1A; i++) {
1880 msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
1881 0xFF0000, i << 16);
1882 PCIE_DUMP(dev,
1883 "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
1884 dev->rc_idx,
1885 readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
1886 readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
1887 }
1888 writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
1889
1890 PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
1891
1892 size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
1893 for (i = 0; i < size; i += 32) {
1894 PCIE_DUMP(dev,
1895 "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1896 dev->rc_idx, i,
1897 readl_relaxed(dev->parf + i),
1898 readl_relaxed(dev->parf + (i + 4)),
1899 readl_relaxed(dev->parf + (i + 8)),
1900 readl_relaxed(dev->parf + (i + 12)),
1901 readl_relaxed(dev->parf + (i + 16)),
1902 readl_relaxed(dev->parf + (i + 20)),
1903 readl_relaxed(dev->parf + (i + 24)),
1904 readl_relaxed(dev->parf + (i + 28)));
1905 }
1906}
1907
1908static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
1909{
1910 PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
1911 dev->rc_idx, dev->enumerated ? "" : "not");
1912 PCIE_DBG_FS(dev, "PCIe: link is %s\n",
1913 (dev->link_status == MSM_PCIE_LINK_ENABLED)
1914 ? "enabled" : "disabled");
1915 PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
1916 dev->cfg_access ? "" : "not");
1917 PCIE_DBG_FS(dev, "use_msi is %d\n",
1918 dev->use_msi);
1919 PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
1920 dev->use_pinctrl);
1921 PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
1922 dev->use_19p2mhz_aux_clk);
1923 PCIE_DBG_FS(dev, "user_suspend is %d\n",
1924 dev->user_suspend);
1925 PCIE_DBG_FS(dev, "num_ep: %d\n",
1926 dev->num_ep);
1927 PCIE_DBG_FS(dev, "num_active_ep: %d\n",
1928 dev->num_active_ep);
1929 PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
1930 dev->pending_ep_reg ? "true" : "false");
1931 PCIE_DBG_FS(dev, "phy_len is %d",
1932 dev->phy_len);
1933 PCIE_DBG_FS(dev, "port_phy_len is %d",
1934 dev->port_phy_len);
1935 PCIE_DBG_FS(dev, "disable_pc is %d",
1936 dev->disable_pc);
1937 PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
1938 dev->l0s_supported ? "" : "not");
1939 PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
1940 dev->l1_supported ? "" : "not");
1941 PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
1942 dev->l1ss_supported ? "" : "not");
1943 PCIE_DBG_FS(dev, "common_clk_en is %d\n",
1944 dev->common_clk_en);
1945 PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
1946 dev->clk_power_manage_en);
1947 PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
1948 dev->aux_clk_sync);
1949 PCIE_DBG_FS(dev, "AER is %s enable\n",
1950 dev->aer_enable ? "" : "not");
1951 PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
1952 dev->ext_ref_clk);
1953 PCIE_DBG_FS(dev, "ep_wakeirq is %d\n",
1954 dev->ep_wakeirq);
1955 PCIE_DBG_FS(dev, "phy_ver is %d\n",
1956 dev->phy_ver);
1957 PCIE_DBG_FS(dev, "drv_ready is %d\n",
1958 dev->drv_ready);
1959 PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
1960 dev->linkdown_panic);
1961 PCIE_DBG_FS(dev, "the link is %s suspending\n",
1962 dev->suspending ? "" : "not");
1963 PCIE_DBG_FS(dev, "shadow is %s enabled\n",
1964 dev->shadow_en ? "" : "not");
1965 PCIE_DBG_FS(dev, "the power of RC is %s on\n",
1966 dev->power_on ? "" : "not");
1967 PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
1968 dev->msi_gicm_addr);
1969 PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
1970 dev->msi_gicm_base);
1971 PCIE_DBG_FS(dev, "bus_client: %d\n",
1972 dev->bus_client);
1973 PCIE_DBG_FS(dev, "current short bdf: %d\n",
1974 dev->current_short_bdf);
1975 PCIE_DBG_FS(dev, "smmu does %s exist\n",
1976 dev->smmu_exist ? "" : "not");
1977 PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
1978 dev->smmu_sid_base);
1979 PCIE_DBG_FS(dev, "n_fts: %d\n",
1980 dev->n_fts);
1981 PCIE_DBG_FS(dev, "common_phy: %d\n",
1982 dev->common_phy);
1983 PCIE_DBG_FS(dev, "ep_latency: %dms\n",
1984 dev->ep_latency);
1985 PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
1986 dev->wr_halt_size);
1987 PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
1988 dev->cpl_timeout);
1989 PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
1990 dev->current_bdf);
1991 PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
1992 dev->perst_delay_us_min);
1993 PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
1994 dev->perst_delay_us_max);
1995 PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
1996 dev->tlp_rd_size);
1997 PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
1998 dev->rc_corr_counter);
1999 PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
2000 dev->rc_non_fatal_counter);
2001 PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
2002 dev->rc_fatal_counter);
2003 PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
2004 dev->ep_corr_counter);
2005 PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
2006 dev->ep_non_fatal_counter);
2007 PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
2008 dev->ep_fatal_counter);
2009 PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
2010 dev->linkdown_counter);
2011 PCIE_DBG_FS(dev, "wake_counter: %lu\n",
2012 dev->wake_counter);
2013 PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
2014 dev->link_turned_on_counter);
2015 PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
2016 dev->link_turned_off_counter);
2017}
2018
2019static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
2020{
2021 int i, j;
2022 u32 val = 0;
2023 u32 *shadow;
2024
2025 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2026 if (!rc && !dev->pcidev_table[i].bdf)
2027 break;
2028 if (rc) {
2029 shadow = dev->rc_shadow;
2030 } else {
2031 shadow = dev->ep_shadow[i];
2032 PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
2033 dev->pcidev_table[i].bdf >> 24,
2034 dev->pcidev_table[i].bdf >> 19 & 0x1f,
2035 dev->pcidev_table[i].bdf >> 16 & 0x07);
2036 }
2037 for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
2038 val = shadow[j];
2039 if (val != PCIE_CLEAR) {
2040 PCIE_DBG_FS(dev,
2041 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
2042 j, j * 4, val);
2043 }
2044 }
2045 if (rc)
2046 break;
2047 }
2048}
2049
2050static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
2051 u32 testcase)
2052{
2053 int ret, i;
2054 u32 base_sel_size = 0;
2055 u32 val = 0;
2056 u32 current_offset = 0;
2057 u32 ep_l1sub_ctrl1_offset = 0;
2058 u32 ep_l1sub_cap_reg1_offset = 0;
2059 u32 ep_link_ctrlstts_offset = 0;
2060 u32 ep_dev_ctrl2stts2_offset = 0;
2061
2062 if (testcase >= 5 && testcase <= 10) {
2063 current_offset =
2064 readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
2065
2066 while (current_offset) {
2067 val = readl_relaxed(dev->conf + current_offset);
2068 if ((val & 0xff) == PCIE20_CAP_ID) {
2069 ep_link_ctrlstts_offset = current_offset +
2070 0x10;
2071 ep_dev_ctrl2stts2_offset = current_offset +
2072 0x28;
2073 break;
2074 }
2075 current_offset = (val >> 8) & 0xff;
2076 }
2077
2078 if (!ep_link_ctrlstts_offset)
2079 PCIE_DBG(dev,
2080 "RC%d endpoint does not support PCIe capability registers\n",
2081 dev->rc_idx);
2082 else
2083 PCIE_DBG(dev,
2084 "RC%d: ep_link_ctrlstts_offset: 0x%x\n",
2085 dev->rc_idx, ep_link_ctrlstts_offset);
2086 }
2087
2088 switch (testcase) {
2089 case 0: /* output status */
2090 PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
2091 dev->rc_idx);
2092 msm_pcie_show_status(dev);
2093 break;
2094 case 1: /* disable link */
2095 PCIE_DBG_FS(dev,
2096 "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
2097 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2098 dev->dev, NULL,
2099 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2100 if (ret)
2101 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2102 __func__);
2103 else
2104 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
2105 __func__);
2106 break;
2107 case 2: /* enable link and recover config space for RC and EP */
2108 PCIE_DBG_FS(dev,
2109 "\n\nPCIe: RC%d: enable link and recover config space\n\n",
2110 dev->rc_idx);
2111 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2112 dev->dev, NULL,
2113 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2114 if (ret)
2115 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2116 __func__);
2117 else {
2118 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2119 msm_pcie_recover_config(dev->dev);
2120 }
2121 break;
2122 case 3: /*
2123 * disable and enable link, recover config space for
2124 * RC and EP
2125 */
2126 PCIE_DBG_FS(dev,
2127 "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
2128 dev->rc_idx);
2129 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2130 dev->dev, NULL,
2131 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2132 if (ret)
2133 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2134 __func__);
2135 else
2136 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
2137 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2138 dev->dev, NULL,
2139 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2140 if (ret)
2141 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2142 __func__);
2143 else {
2144 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2145 msm_pcie_recover_config(dev->dev);
2146 }
2147 break;
2148 case 4: /* dump shadow registers for RC and EP */
2149 PCIE_DBG_FS(dev,
2150 "\n\nPCIe: RC%d: dumping RC shadow registers\n",
2151 dev->rc_idx);
2152 msm_pcie_shadow_dump(dev, true);
2153
2154 PCIE_DBG_FS(dev,
2155 "\n\nPCIe: RC%d: dumping EP shadow registers\n",
2156 dev->rc_idx);
2157 msm_pcie_shadow_dump(dev, false);
2158 break;
2159 case 5: /* disable L0s */
2160 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
2161 dev->rc_idx);
2162 msm_pcie_write_mask(dev->dm_core +
2163 PCIE20_CAP_LINKCTRLSTATUS,
2164 BIT(0), 0);
2165 msm_pcie_write_mask(dev->conf +
2166 ep_link_ctrlstts_offset,
2167 BIT(0), 0);
2168 if (dev->shadow_en) {
2169 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2170 readl_relaxed(dev->dm_core +
2171 PCIE20_CAP_LINKCTRLSTATUS);
2172 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2173 readl_relaxed(dev->conf +
2174 ep_link_ctrlstts_offset);
2175 }
2176 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2177 readl_relaxed(dev->dm_core +
2178 PCIE20_CAP_LINKCTRLSTATUS));
2179 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2180 readl_relaxed(dev->conf +
2181 ep_link_ctrlstts_offset));
2182 break;
2183 case 6: /* enable L0s */
2184 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
2185 dev->rc_idx);
2186 msm_pcie_write_mask(dev->dm_core +
2187 PCIE20_CAP_LINKCTRLSTATUS,
2188 0, BIT(0));
2189 msm_pcie_write_mask(dev->conf +
2190 ep_link_ctrlstts_offset,
2191 0, BIT(0));
2192 if (dev->shadow_en) {
2193 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2194 readl_relaxed(dev->dm_core +
2195 PCIE20_CAP_LINKCTRLSTATUS);
2196 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2197 readl_relaxed(dev->conf +
2198 ep_link_ctrlstts_offset);
2199 }
2200 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2201 readl_relaxed(dev->dm_core +
2202 PCIE20_CAP_LINKCTRLSTATUS));
2203 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2204 readl_relaxed(dev->conf +
2205 ep_link_ctrlstts_offset));
2206 break;
2207 case 7: /* disable L1 */
2208 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
2209 dev->rc_idx);
2210 msm_pcie_write_mask(dev->dm_core +
2211 PCIE20_CAP_LINKCTRLSTATUS,
2212 BIT(1), 0);
2213 msm_pcie_write_mask(dev->conf +
2214 ep_link_ctrlstts_offset,
2215 BIT(1), 0);
2216 if (dev->shadow_en) {
2217 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2218 readl_relaxed(dev->dm_core +
2219 PCIE20_CAP_LINKCTRLSTATUS);
2220 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2221 readl_relaxed(dev->conf +
2222 ep_link_ctrlstts_offset);
2223 }
2224 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2225 readl_relaxed(dev->dm_core +
2226 PCIE20_CAP_LINKCTRLSTATUS));
2227 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2228 readl_relaxed(dev->conf +
2229 ep_link_ctrlstts_offset));
2230 break;
2231 case 8: /* enable L1 */
2232 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
2233 dev->rc_idx);
2234 msm_pcie_write_mask(dev->dm_core +
2235 PCIE20_CAP_LINKCTRLSTATUS,
2236 0, BIT(1));
2237 msm_pcie_write_mask(dev->conf +
2238 ep_link_ctrlstts_offset,
2239 0, BIT(1));
2240 if (dev->shadow_en) {
2241 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2242 readl_relaxed(dev->dm_core +
2243 PCIE20_CAP_LINKCTRLSTATUS);
2244 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2245 readl_relaxed(dev->conf +
2246 ep_link_ctrlstts_offset);
2247 }
2248 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2249 readl_relaxed(dev->dm_core +
2250 PCIE20_CAP_LINKCTRLSTATUS));
2251 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2252 readl_relaxed(dev->conf +
2253 ep_link_ctrlstts_offset));
2254 break;
2255 case 9: /* disable L1ss */
2256 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
2257 dev->rc_idx);
2258 current_offset = PCIE_EXT_CAP_OFFSET;
2259 while (current_offset) {
2260 val = readl_relaxed(dev->conf + current_offset);
2261 if ((val & 0xffff) == L1SUB_CAP_ID) {
2262 ep_l1sub_ctrl1_offset =
2263 current_offset + 0x8;
2264 break;
2265 }
2266 current_offset = val >> 20;
2267 }
2268 if (!ep_l1sub_ctrl1_offset) {
2269 PCIE_DBG_FS(dev,
2270 "PCIe: RC%d endpoint does not support l1ss registers\n",
2271 dev->rc_idx);
2272 break;
2273 }
2274
2275 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2276 dev->rc_idx, ep_l1sub_ctrl1_offset);
2277
2278 msm_pcie_write_reg_field(dev->dm_core,
2279 PCIE20_L1SUB_CONTROL1,
2280 0xf, 0);
2281 msm_pcie_write_mask(dev->dm_core +
2282 PCIE20_DEVICE_CONTROL2_STATUS2,
2283 BIT(10), 0);
2284 msm_pcie_write_reg_field(dev->conf,
2285 ep_l1sub_ctrl1_offset,
2286 0xf, 0);
2287 msm_pcie_write_mask(dev->conf +
2288 ep_dev_ctrl2stts2_offset,
2289 BIT(10), 0);
2290 if (dev->shadow_en) {
2291 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2292 readl_relaxed(dev->dm_core +
2293 PCIE20_L1SUB_CONTROL1);
2294 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2295 readl_relaxed(dev->dm_core +
2296 PCIE20_DEVICE_CONTROL2_STATUS2);
2297 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2298 readl_relaxed(dev->conf +
2299 ep_l1sub_ctrl1_offset);
2300 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2301 readl_relaxed(dev->conf +
2302 ep_dev_ctrl2stts2_offset);
2303 }
2304 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2305 readl_relaxed(dev->dm_core +
2306 PCIE20_L1SUB_CONTROL1));
2307 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2308 readl_relaxed(dev->dm_core +
2309 PCIE20_DEVICE_CONTROL2_STATUS2));
2310 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2311 readl_relaxed(dev->conf +
2312 ep_l1sub_ctrl1_offset));
2313 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2314 readl_relaxed(dev->conf +
2315 ep_dev_ctrl2stts2_offset));
2316 break;
2317 case 10: /* enable L1ss */
2318 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
2319 dev->rc_idx);
2320 current_offset = PCIE_EXT_CAP_OFFSET;
2321 while (current_offset) {
2322 val = readl_relaxed(dev->conf + current_offset);
2323 if ((val & 0xffff) == L1SUB_CAP_ID) {
2324 ep_l1sub_cap_reg1_offset =
2325 current_offset + 0x4;
2326 ep_l1sub_ctrl1_offset =
2327 current_offset + 0x8;
2328 break;
2329 }
2330 current_offset = val >> 20;
2331 }
2332 if (!ep_l1sub_ctrl1_offset) {
2333 PCIE_DBG_FS(dev,
2334 "PCIe: RC%d endpoint does not support l1ss registers\n",
2335 dev->rc_idx);
2336 break;
2337 }
2338
2339 val = readl_relaxed(dev->conf +
2340 ep_l1sub_cap_reg1_offset);
2341
2342 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
2343 val);
2344 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2345 dev->rc_idx, ep_l1sub_ctrl1_offset);
2346
2347 val &= 0xf;
2348
2349 msm_pcie_write_reg_field(dev->dm_core,
2350 PCIE20_L1SUB_CONTROL1,
2351 0xf, val);
2352 msm_pcie_write_mask(dev->dm_core +
2353 PCIE20_DEVICE_CONTROL2_STATUS2,
2354 0, BIT(10));
2355 msm_pcie_write_reg_field(dev->conf,
2356 ep_l1sub_ctrl1_offset,
2357 0xf, val);
2358 msm_pcie_write_mask(dev->conf +
2359 ep_dev_ctrl2stts2_offset,
2360 0, BIT(10));
2361 if (dev->shadow_en) {
2362 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2363 readl_relaxed(dev->dm_core +
2364 PCIE20_L1SUB_CONTROL1);
2365 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2366 readl_relaxed(dev->dm_core +
2367 PCIE20_DEVICE_CONTROL2_STATUS2);
2368 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2369 readl_relaxed(dev->conf +
2370 ep_l1sub_ctrl1_offset);
2371 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2372 readl_relaxed(dev->conf +
2373 ep_dev_ctrl2stts2_offset);
2374 }
2375 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2376 readl_relaxed(dev->dm_core +
2377 PCIE20_L1SUB_CONTROL1));
2378 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2379 readl_relaxed(dev->dm_core +
2380 PCIE20_DEVICE_CONTROL2_STATUS2));
2381 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2382 readl_relaxed(dev->conf +
2383 ep_l1sub_ctrl1_offset));
2384 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2385 readl_relaxed(dev->conf +
2386 ep_dev_ctrl2stts2_offset));
2387 break;
2388 case 11: /* enumerate PCIe */
2389 PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
2390 dev->rc_idx);
2391 if (dev->enumerated)
2392 PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
2393 dev->rc_idx);
2394 else {
2395 if (!msm_pcie_enumerate(dev->rc_idx))
2396 PCIE_DBG_FS(dev,
2397 "PCIe: RC%d is successfully enumerated\n",
2398 dev->rc_idx);
2399 else
2400 PCIE_DBG_FS(dev,
2401 "PCIe: RC%d enumeration failed\n",
2402 dev->rc_idx);
2403 }
2404 break;
2405 case 12: /* write a value to a register */
2406 PCIE_DBG_FS(dev,
2407 "\n\nPCIe: RC%d: writing a value to a register\n\n",
2408 dev->rc_idx);
2409
2410 if (!base_sel) {
2411 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2412 break;
2413 }
2414
2415 PCIE_DBG_FS(dev,
2416 "base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
2417 dev->res[base_sel - 1].name,
2418 dev->res[base_sel - 1].base,
2419 wr_offset, wr_mask, wr_value);
2420
Tony Truong95747382017-01-06 14:03:03 -08002421 base_sel_size = resource_size(dev->res[base_sel - 1].resource);
2422
2423 if (wr_offset > base_sel_size - 4 ||
2424 msm_pcie_check_align(dev, wr_offset))
2425 PCIE_DBG_FS(dev,
2426 "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
2427 dev->rc_idx, wr_offset, base_sel_size - 4);
2428 else
2429 msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
2430 wr_offset, wr_mask, wr_value);
Tony Truong349ee492014-10-01 17:35:56 -07002431
2432 break;
2433 case 13: /* dump all registers of base_sel */
2434 if (!base_sel) {
2435 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2436 break;
2437 } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
2438 pcie_parf_dump(dev);
2439 break;
2440 } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
2441 pcie_phy_dump(dev);
2442 break;
2443 } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
2444 base_sel_size = 0x1000;
2445 } else {
2446 base_sel_size = resource_size(
2447 dev->res[base_sel - 1].resource);
2448 }
2449
2450 PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
2451 dev->res[base_sel - 1].name, dev->rc_idx);
2452
2453 for (i = 0; i < base_sel_size; i += 32) {
2454 PCIE_DBG_FS(dev,
2455 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2456 i, readl_relaxed(dev->res[base_sel - 1].base + i),
2457 readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
2458 readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
2459 readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
2460 readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
2461 readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
2462 readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
2463 readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
2464 }
2465 break;
2466 default:
2467 PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
2468 break;
2469 }
2470}
2471
2472int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
2473 u32 offset, u32 mask, u32 value)
2474{
2475 int ret = 0;
2476 struct msm_pcie_dev_t *pdev = NULL;
2477
2478 if (!dev) {
2479 pr_err("PCIe: the input pci dev is NULL.\n");
2480 return -ENODEV;
2481 }
2482
2483 if (option == 12 || option == 13) {
2484 if (!base || base > 5) {
2485 PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
2486 PCIE_DBG_FS(pdev,
2487 "PCIe: base_sel is still 0x%x\n", base_sel);
2488 return -EINVAL;
2489 }
2490
2491 base_sel = base;
2492 PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
2493
2494 if (option == 12) {
2495 wr_offset = offset;
2496 wr_mask = mask;
2497 wr_value = value;
2498
2499 PCIE_DBG_FS(pdev,
2500 "PCIe: wr_offset is now 0x%x\n", wr_offset);
2501 PCIE_DBG_FS(pdev,
2502 "PCIe: wr_mask is now 0x%x\n", wr_mask);
2503 PCIE_DBG_FS(pdev,
2504 "PCIe: wr_value is now 0x%x\n", wr_value);
2505 }
2506 }
2507
2508 pdev = PCIE_BUS_PRIV_DATA(dev->bus);
2509 rc_sel = 1 << pdev->rc_idx;
2510
2511 msm_pcie_sel_debug_testcase(pdev, option);
2512
2513 return ret;
2514}
2515EXPORT_SYMBOL(msm_pcie_debug_info);
2516
Tony Truongbd9a3412017-02-27 18:30:13 -08002517#ifdef CONFIG_SYSFS
2518static ssize_t msm_pcie_enumerate_store(struct device *dev,
2519 struct device_attribute *attr,
2520 const char *buf, size_t count)
2521{
2522 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
2523 dev_get_drvdata(dev);
2524
2525 if (pcie_dev)
2526 msm_pcie_enumerate(pcie_dev->rc_idx);
2527
2528 return count;
2529}
2530
2531static DEVICE_ATTR(enumerate, 0200, NULL, msm_pcie_enumerate_store);
2532
2533static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
2534{
2535 int ret;
2536
2537 ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
2538 if (ret)
2539 PCIE_DBG_FS(dev,
2540 "RC%d: failed to create sysfs enumerate node\n",
2541 dev->rc_idx);
2542}
2543
2544static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2545{
2546 if (dev->pdev)
2547 device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
2548}
2549#else
2550static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
2551{
2552}
2553
2554static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2555{
2556}
2557#endif
2558
Tony Truong349ee492014-10-01 17:35:56 -07002559#ifdef CONFIG_DEBUG_FS
2560static struct dentry *dent_msm_pcie;
2561static struct dentry *dfile_rc_sel;
2562static struct dentry *dfile_case;
2563static struct dentry *dfile_base_sel;
2564static struct dentry *dfile_linkdown_panic;
2565static struct dentry *dfile_wr_offset;
2566static struct dentry *dfile_wr_mask;
2567static struct dentry *dfile_wr_value;
2568static struct dentry *dfile_ep_wakeirq;
2569static struct dentry *dfile_aer_enable;
2570static struct dentry *dfile_corr_counter_limit;
2571
2572static u32 rc_sel_max;
2573
2574static ssize_t msm_pcie_cmd_debug(struct file *file,
2575 const char __user *buf,
2576 size_t count, loff_t *ppos)
2577{
2578 unsigned long ret;
2579 char str[MAX_MSG_LEN];
2580 unsigned int testcase = 0;
2581 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002582 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002583
Tony Truongfdbd5672017-01-06 16:23:14 -08002584 memset(str, 0, size);
2585 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002586 if (ret)
2587 return -EFAULT;
2588
Tony Truongfdbd5672017-01-06 16:23:14 -08002589 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002590 testcase = (testcase * 10) + (str[i] - '0');
2591
2592 if (!rc_sel)
2593 rc_sel = 1;
2594
2595 pr_alert("PCIe: TEST: %d\n", testcase);
2596
2597 for (i = 0; i < MAX_RC_NUM; i++) {
2598 if (!((rc_sel >> i) & 0x1))
2599 continue;
2600 msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
2601 }
2602
2603 return count;
2604}
2605
2606const struct file_operations msm_pcie_cmd_debug_ops = {
2607 .write = msm_pcie_cmd_debug,
2608};
2609
2610static ssize_t msm_pcie_set_rc_sel(struct file *file,
2611 const char __user *buf,
2612 size_t count, loff_t *ppos)
2613{
2614 unsigned long ret;
2615 char str[MAX_MSG_LEN];
2616 int i;
2617 u32 new_rc_sel = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002618 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002619
Tony Truongfdbd5672017-01-06 16:23:14 -08002620 memset(str, 0, size);
2621 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002622 if (ret)
2623 return -EFAULT;
2624
Tony Truongfdbd5672017-01-06 16:23:14 -08002625 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002626 new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
2627
2628 if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
2629 pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
2630 pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
2631 } else {
2632 rc_sel = new_rc_sel;
2633 pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
2634 }
2635
2636 pr_alert("PCIe: the following RC(s) will be tested:\n");
2637 for (i = 0; i < MAX_RC_NUM; i++) {
2638 if (!rc_sel) {
2639 pr_alert("RC %d\n", i);
2640 break;
2641 } else if (rc_sel & (1 << i)) {
2642 pr_alert("RC %d\n", i);
2643 }
2644 }
2645
2646 return count;
2647}
2648
2649const struct file_operations msm_pcie_rc_sel_ops = {
2650 .write = msm_pcie_set_rc_sel,
2651};
2652
2653static ssize_t msm_pcie_set_base_sel(struct file *file,
2654 const char __user *buf,
2655 size_t count, loff_t *ppos)
2656{
2657 unsigned long ret;
2658 char str[MAX_MSG_LEN];
2659 int i;
2660 u32 new_base_sel = 0;
2661 char *base_sel_name;
Tony Truongfdbd5672017-01-06 16:23:14 -08002662 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002663
Tony Truongfdbd5672017-01-06 16:23:14 -08002664 memset(str, 0, size);
2665 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002666 if (ret)
2667 return -EFAULT;
2668
Tony Truongfdbd5672017-01-06 16:23:14 -08002669 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002670 new_base_sel = (new_base_sel * 10) + (str[i] - '0');
2671
2672 if (!new_base_sel || new_base_sel > 5) {
2673 pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
2674 new_base_sel);
2675 pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
2676 } else {
2677 base_sel = new_base_sel;
2678 pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
2679 }
2680
2681 switch (base_sel) {
2682 case 1:
2683 base_sel_name = "PARF";
2684 break;
2685 case 2:
2686 base_sel_name = "PHY";
2687 break;
2688 case 3:
2689 base_sel_name = "RC CONFIG SPACE";
2690 break;
2691 case 4:
2692 base_sel_name = "ELBI";
2693 break;
2694 case 5:
2695 base_sel_name = "EP CONFIG SPACE";
2696 break;
2697 default:
2698 base_sel_name = "INVALID";
2699 break;
2700 }
2701
2702 pr_alert("%s\n", base_sel_name);
2703
2704 return count;
2705}
2706
2707const struct file_operations msm_pcie_base_sel_ops = {
2708 .write = msm_pcie_set_base_sel,
2709};
2710
2711static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
2712 const char __user *buf,
2713 size_t count, loff_t *ppos)
2714{
2715 unsigned long ret;
2716 char str[MAX_MSG_LEN];
2717 u32 new_linkdown_panic = 0;
2718 int i;
2719
2720 memset(str, 0, sizeof(str));
2721 ret = copy_from_user(str, buf, sizeof(str));
2722 if (ret)
2723 return -EFAULT;
2724
2725 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2726 new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
2727
2728 if (new_linkdown_panic <= 1) {
2729 for (i = 0; i < MAX_RC_NUM; i++) {
2730 if (!rc_sel) {
2731 msm_pcie_dev[0].linkdown_panic =
2732 new_linkdown_panic;
2733 PCIE_DBG_FS(&msm_pcie_dev[0],
2734 "PCIe: RC0: linkdown_panic is now %d\n",
2735 msm_pcie_dev[0].linkdown_panic);
2736 break;
2737 } else if (rc_sel & (1 << i)) {
2738 msm_pcie_dev[i].linkdown_panic =
2739 new_linkdown_panic;
2740 PCIE_DBG_FS(&msm_pcie_dev[i],
2741 "PCIe: RC%d: linkdown_panic is now %d\n",
2742 i, msm_pcie_dev[i].linkdown_panic);
2743 }
2744 }
2745 } else {
2746 pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
2747 new_linkdown_panic);
2748 }
2749
2750 return count;
2751}
2752
2753const struct file_operations msm_pcie_linkdown_panic_ops = {
2754 .write = msm_pcie_set_linkdown_panic,
2755};
2756
2757static ssize_t msm_pcie_set_wr_offset(struct file *file,
2758 const char __user *buf,
2759 size_t count, loff_t *ppos)
2760{
2761 unsigned long ret;
2762 char str[MAX_MSG_LEN];
2763 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002764 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002765
Tony Truongfdbd5672017-01-06 16:23:14 -08002766 memset(str, 0, size);
2767 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002768 if (ret)
2769 return -EFAULT;
2770
2771 wr_offset = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002772 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002773 wr_offset = (wr_offset * 10) + (str[i] - '0');
2774
2775 pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
2776
2777 return count;
2778}
2779
2780const struct file_operations msm_pcie_wr_offset_ops = {
2781 .write = msm_pcie_set_wr_offset,
2782};
2783
2784static ssize_t msm_pcie_set_wr_mask(struct file *file,
2785 const char __user *buf,
2786 size_t count, loff_t *ppos)
2787{
2788 unsigned long ret;
2789 char str[MAX_MSG_LEN];
2790 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002791 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002792
Tony Truongfdbd5672017-01-06 16:23:14 -08002793 memset(str, 0, size);
2794 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002795 if (ret)
2796 return -EFAULT;
2797
2798 wr_mask = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002799 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002800 wr_mask = (wr_mask * 10) + (str[i] - '0');
2801
2802 pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
2803
2804 return count;
2805}
2806
2807const struct file_operations msm_pcie_wr_mask_ops = {
2808 .write = msm_pcie_set_wr_mask,
2809};
2810static ssize_t msm_pcie_set_wr_value(struct file *file,
2811 const char __user *buf,
2812 size_t count, loff_t *ppos)
2813{
2814 unsigned long ret;
2815 char str[MAX_MSG_LEN];
2816 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002817 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002818
Tony Truongfdbd5672017-01-06 16:23:14 -08002819 memset(str, 0, size);
2820 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002821 if (ret)
2822 return -EFAULT;
2823
2824 wr_value = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002825 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002826 wr_value = (wr_value * 10) + (str[i] - '0');
2827
2828 pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
2829
2830 return count;
2831}
2832
2833const struct file_operations msm_pcie_wr_value_ops = {
2834 .write = msm_pcie_set_wr_value,
2835};
2836
2837static ssize_t msm_pcie_set_ep_wakeirq(struct file *file,
2838 const char __user *buf,
2839 size_t count, loff_t *ppos)
2840{
2841 unsigned long ret;
2842 char str[MAX_MSG_LEN];
2843 u32 new_ep_wakeirq = 0;
2844 int i;
2845
2846 memset(str, 0, sizeof(str));
2847 ret = copy_from_user(str, buf, sizeof(str));
2848 if (ret)
2849 return -EFAULT;
2850
2851 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2852 new_ep_wakeirq = (new_ep_wakeirq * 10) + (str[i] - '0');
2853
2854 if (new_ep_wakeirq <= 1) {
2855 for (i = 0; i < MAX_RC_NUM; i++) {
2856 if (!rc_sel) {
2857 msm_pcie_dev[0].ep_wakeirq = new_ep_wakeirq;
2858 PCIE_DBG_FS(&msm_pcie_dev[0],
2859 "PCIe: RC0: ep_wakeirq is now %d\n",
2860 msm_pcie_dev[0].ep_wakeirq);
2861 break;
2862 } else if (rc_sel & (1 << i)) {
2863 msm_pcie_dev[i].ep_wakeirq = new_ep_wakeirq;
2864 PCIE_DBG_FS(&msm_pcie_dev[i],
2865 "PCIe: RC%d: ep_wakeirq is now %d\n",
2866 i, msm_pcie_dev[i].ep_wakeirq);
2867 }
2868 }
2869 } else {
2870 pr_err("PCIe: Invalid input for ep_wakeirq: %d. Please enter 0 or 1.\n",
2871 new_ep_wakeirq);
2872 }
2873
2874 return count;
2875}
2876
2877const struct file_operations msm_pcie_ep_wakeirq_ops = {
2878 .write = msm_pcie_set_ep_wakeirq,
2879};
2880
2881static ssize_t msm_pcie_set_aer_enable(struct file *file,
2882 const char __user *buf,
2883 size_t count, loff_t *ppos)
2884{
2885 unsigned long ret;
2886 char str[MAX_MSG_LEN];
2887 u32 new_aer_enable = 0;
2888 u32 temp_rc_sel;
2889 int i;
2890
2891 memset(str, 0, sizeof(str));
2892 ret = copy_from_user(str, buf, sizeof(str));
2893 if (ret)
2894 return -EFAULT;
2895
2896 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2897 new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
2898
2899 if (new_aer_enable > 1) {
2900 pr_err(
2901 "PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
2902 new_aer_enable);
2903 return count;
2904 }
2905
2906 if (rc_sel)
2907 temp_rc_sel = rc_sel;
2908 else
2909 temp_rc_sel = 0x1;
2910
2911 for (i = 0; i < MAX_RC_NUM; i++) {
2912 if (temp_rc_sel & (1 << i)) {
2913 msm_pcie_dev[i].aer_enable = new_aer_enable;
2914 PCIE_DBG_FS(&msm_pcie_dev[i],
2915 "PCIe: RC%d: aer_enable is now %d\n",
2916 i, msm_pcie_dev[i].aer_enable);
2917
2918 msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
2919 PCIE20_BRIDGE_CTRL,
2920 new_aer_enable ? 0 : BIT(16),
2921 new_aer_enable ? BIT(16) : 0);
2922
2923 PCIE_DBG_FS(&msm_pcie_dev[i],
2924 "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
2925 readl_relaxed(msm_pcie_dev[i].dm_core +
2926 PCIE20_BRIDGE_CTRL));
2927 }
2928 }
2929
2930 return count;
2931}
2932
2933const struct file_operations msm_pcie_aer_enable_ops = {
2934 .write = msm_pcie_set_aer_enable,
2935};
2936
2937static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
2938 const char __user *buf,
2939 size_t count, loff_t *ppos)
2940{
2941 unsigned long ret;
2942 char str[MAX_MSG_LEN];
2943 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002944 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002945
Tony Truongfdbd5672017-01-06 16:23:14 -08002946 memset(str, 0, size);
2947 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002948 if (ret)
2949 return -EFAULT;
2950
2951 corr_counter_limit = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002952 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002953 corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
2954
2955 pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
2956
2957 return count;
2958}
2959
2960const struct file_operations msm_pcie_corr_counter_limit_ops = {
2961 .write = msm_pcie_set_corr_counter_limit,
2962};
2963
2964static void msm_pcie_debugfs_init(void)
2965{
2966 rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
2967 wr_mask = 0xffffffff;
2968
2969 dent_msm_pcie = debugfs_create_dir("pci-msm", 0);
2970 if (IS_ERR(dent_msm_pcie)) {
2971 pr_err("PCIe: fail to create the folder for debug_fs.\n");
2972 return;
2973 }
2974
2975 dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
2976 dent_msm_pcie, 0,
2977 &msm_pcie_rc_sel_ops);
2978 if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
2979 pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
2980 goto rc_sel_error;
2981 }
2982
2983 dfile_case = debugfs_create_file("case", 0664,
2984 dent_msm_pcie, 0,
2985 &msm_pcie_cmd_debug_ops);
2986 if (!dfile_case || IS_ERR(dfile_case)) {
2987 pr_err("PCIe: fail to create the file for debug_fs case.\n");
2988 goto case_error;
2989 }
2990
2991 dfile_base_sel = debugfs_create_file("base_sel", 0664,
2992 dent_msm_pcie, 0,
2993 &msm_pcie_base_sel_ops);
2994 if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
2995 pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
2996 goto base_sel_error;
2997 }
2998
2999 dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
3000 dent_msm_pcie, 0,
3001 &msm_pcie_linkdown_panic_ops);
3002 if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
3003 pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
3004 goto linkdown_panic_error;
3005 }
3006
3007 dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
3008 dent_msm_pcie, 0,
3009 &msm_pcie_wr_offset_ops);
3010 if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
3011 pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
3012 goto wr_offset_error;
3013 }
3014
3015 dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
3016 dent_msm_pcie, 0,
3017 &msm_pcie_wr_mask_ops);
3018 if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
3019 pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
3020 goto wr_mask_error;
3021 }
3022
3023 dfile_wr_value = debugfs_create_file("wr_value", 0664,
3024 dent_msm_pcie, 0,
3025 &msm_pcie_wr_value_ops);
3026 if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
3027 pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
3028 goto wr_value_error;
3029 }
3030
3031 dfile_ep_wakeirq = debugfs_create_file("ep_wakeirq", 0664,
3032 dent_msm_pcie, 0,
3033 &msm_pcie_ep_wakeirq_ops);
3034 if (!dfile_ep_wakeirq || IS_ERR(dfile_ep_wakeirq)) {
3035 pr_err("PCIe: fail to create the file for debug_fs ep_wakeirq.\n");
3036 goto ep_wakeirq_error;
3037 }
3038
3039 dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
3040 dent_msm_pcie, 0,
3041 &msm_pcie_aer_enable_ops);
3042 if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
3043 pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
3044 goto aer_enable_error;
3045 }
3046
3047 dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
3048 0664, dent_msm_pcie, 0,
3049 &msm_pcie_corr_counter_limit_ops);
3050 if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
3051 pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
3052 goto corr_counter_limit_error;
3053 }
3054 return;
3055
3056corr_counter_limit_error:
3057 debugfs_remove(dfile_aer_enable);
3058aer_enable_error:
3059 debugfs_remove(dfile_ep_wakeirq);
3060ep_wakeirq_error:
3061 debugfs_remove(dfile_wr_value);
3062wr_value_error:
3063 debugfs_remove(dfile_wr_mask);
3064wr_mask_error:
3065 debugfs_remove(dfile_wr_offset);
3066wr_offset_error:
3067 debugfs_remove(dfile_linkdown_panic);
3068linkdown_panic_error:
3069 debugfs_remove(dfile_base_sel);
3070base_sel_error:
3071 debugfs_remove(dfile_case);
3072case_error:
3073 debugfs_remove(dfile_rc_sel);
3074rc_sel_error:
3075 debugfs_remove(dent_msm_pcie);
3076}
3077
3078static void msm_pcie_debugfs_exit(void)
3079{
3080 debugfs_remove(dfile_rc_sel);
3081 debugfs_remove(dfile_case);
3082 debugfs_remove(dfile_base_sel);
3083 debugfs_remove(dfile_linkdown_panic);
3084 debugfs_remove(dfile_wr_offset);
3085 debugfs_remove(dfile_wr_mask);
3086 debugfs_remove(dfile_wr_value);
3087 debugfs_remove(dfile_ep_wakeirq);
3088 debugfs_remove(dfile_aer_enable);
3089 debugfs_remove(dfile_corr_counter_limit);
3090}
3091#else
3092static void msm_pcie_debugfs_init(void)
3093{
3094}
3095
3096static void msm_pcie_debugfs_exit(void)
3097{
3098}
3099#endif
3100
3101static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
3102{
3103 return readl_relaxed(dev->dm_core +
3104 PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
3105}
3106
3107/**
3108 * msm_pcie_iatu_config - configure outbound address translation region
3109 * @dev: root commpex
3110 * @nr: region number
3111 * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
3112 * @host_addr: - region start address on host
3113 * @host_end: - region end address (low 32 bit) on host,
3114 * upper 32 bits are same as for @host_addr
3115 * @target_addr: - region start address on target
3116 */
3117static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
3118 unsigned long host_addr, u32 host_end,
3119 unsigned long target_addr)
3120{
3121 void __iomem *pcie20 = dev->dm_core;
3122
3123 if (dev->shadow_en) {
3124 dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
3125 nr;
3126 dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
3127 type;
3128 dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
3129 lower_32_bits(host_addr);
3130 dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
3131 upper_32_bits(host_addr);
3132 dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
3133 host_end;
3134 dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
3135 lower_32_bits(target_addr);
3136 dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
3137 upper_32_bits(target_addr);
3138 dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
3139 BIT(31);
3140 }
3141
3142 /* select region */
3143 writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
3144 /* ensure that hardware locks it */
3145 wmb();
3146
3147 /* switch off region before changing it */
3148 writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
3149 /* and wait till it propagates to the hardware */
3150 wmb();
3151
3152 writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
3153 writel_relaxed(lower_32_bits(host_addr),
3154 pcie20 + PCIE20_PLR_IATU_LBAR);
3155 writel_relaxed(upper_32_bits(host_addr),
3156 pcie20 + PCIE20_PLR_IATU_UBAR);
3157 writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
3158 writel_relaxed(lower_32_bits(target_addr),
3159 pcie20 + PCIE20_PLR_IATU_LTAR);
3160 writel_relaxed(upper_32_bits(target_addr),
3161 pcie20 + PCIE20_PLR_IATU_UTAR);
3162 /* ensure that changes propagated to the hardware */
3163 wmb();
3164 writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
3165
3166 /* ensure that changes propagated to the hardware */
3167 wmb();
3168
3169 if (dev->enumerated) {
3170 PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
3171 dev->pcidev_table[nr].bdf >> 24,
3172 dev->pcidev_table[nr].bdf >> 19 & 0x1f,
3173 dev->pcidev_table[nr].bdf >> 16 & 0x07);
3174 PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
3175 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
3176 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
3177 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
3178 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
3179 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
3180 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
3181 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
3182 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
3183 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
3184 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
3185 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
3186 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
3187 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
3188 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
3189 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
3190 }
3191}
3192
3193/**
3194 * msm_pcie_cfg_bdf - configure for config access
3195 * @dev: root commpex
3196 * @bus: PCI bus number
3197 * @devfn: PCI dev and function number
3198 *
3199 * Remap if required region 0 for config access of proper type
3200 * (CFG0 for bus 1, CFG1 for other buses)
3201 * Cache current device bdf for speed-up
3202 */
3203static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
3204{
3205 struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
3206 u32 bdf = BDF_OFFSET(bus, devfn);
3207 u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3208
3209 if (dev->current_bdf == bdf)
3210 return;
3211
3212 msm_pcie_iatu_config(dev, 0, type,
3213 axi_conf->start,
3214 axi_conf->start + SZ_4K - 1,
3215 bdf);
3216
3217 dev->current_bdf = bdf;
3218}
3219
3220static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
3221 u32 word_offset, u32 wr_val,
3222 u32 bdf, bool rc)
3223{
3224 int i, j;
3225 u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
3226
3227 if (rc) {
3228 dev->rc_shadow[word_offset / 4] = wr_val;
3229 } else {
3230 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3231 if (!dev->pcidev_table[i].bdf) {
3232 for (j = 0; j < max_dev; j++)
3233 if (!msm_pcie_dev_tbl[j].bdf) {
3234 msm_pcie_dev_tbl[j].bdf = bdf;
3235 break;
3236 }
3237 dev->pcidev_table[i].bdf = bdf;
3238 if ((!dev->bridge_found) && (i > 0))
3239 dev->bridge_found = true;
3240 }
3241 if (dev->pcidev_table[i].bdf == bdf) {
3242 dev->ep_shadow[i][word_offset / 4] = wr_val;
3243 break;
3244 }
3245 }
3246 }
3247}
3248
3249static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
3250 int where, int size, u32 *val)
3251{
3252 uint32_t word_offset, byte_offset, mask;
3253 uint32_t rd_val, wr_val;
3254 struct msm_pcie_dev_t *dev;
3255 void __iomem *config_base;
3256 bool rc = false;
3257 u32 rc_idx;
3258 int rv = 0;
3259 u32 bdf = BDF_OFFSET(bus->number, devfn);
3260 int i;
3261
3262 dev = PCIE_BUS_PRIV_DATA(bus);
3263
3264 if (!dev) {
3265 pr_err("PCIe: No device found for this bus.\n");
3266 *val = ~0;
3267 rv = PCIBIOS_DEVICE_NOT_FOUND;
3268 goto out;
3269 }
3270
3271 rc_idx = dev->rc_idx;
3272 rc = (bus->number == 0);
3273
3274 spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
3275
3276 if (!dev->cfg_access) {
3277 PCIE_DBG3(dev,
3278 "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
3279 rc_idx, bus->number, devfn, where, size);
3280 *val = ~0;
3281 rv = PCIBIOS_DEVICE_NOT_FOUND;
3282 goto unlock;
3283 }
3284
3285 if (rc && (devfn != 0)) {
3286 PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
3287 (oper == RD) ? "rd" : "wr", bus->number, devfn);
3288 *val = ~0;
3289 rv = PCIBIOS_DEVICE_NOT_FOUND;
3290 goto unlock;
3291 }
3292
3293 if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
3294 PCIE_DBG3(dev,
3295 "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
3296 rc_idx, bus->number, devfn, where, size);
3297 *val = ~0;
3298 rv = PCIBIOS_DEVICE_NOT_FOUND;
3299 goto unlock;
3300 }
3301
3302 /* check if the link is up for endpoint */
3303 if (!rc && !msm_pcie_is_link_up(dev)) {
3304 PCIE_ERR(dev,
3305 "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
3306 rc_idx, (oper == RD) ? "rd" : "wr",
3307 bus->number, devfn);
3308 *val = ~0;
3309 rv = PCIBIOS_DEVICE_NOT_FOUND;
3310 goto unlock;
3311 }
3312
3313 if (!rc && !dev->enumerated)
3314 msm_pcie_cfg_bdf(dev, bus->number, devfn);
3315
3316 word_offset = where & ~0x3;
3317 byte_offset = where & 0x3;
Tony Truonge48ec872017-03-14 12:47:58 -07003318 mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
Tony Truong349ee492014-10-01 17:35:56 -07003319
3320 if (rc || !dev->enumerated) {
3321 config_base = rc ? dev->dm_core : dev->conf;
3322 } else {
3323 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3324 if (dev->pcidev_table[i].bdf == bdf) {
3325 config_base = dev->pcidev_table[i].conf_base;
3326 break;
3327 }
3328 }
3329 if (i == MAX_DEVICE_NUM) {
3330 *val = ~0;
3331 rv = PCIBIOS_DEVICE_NOT_FOUND;
3332 goto unlock;
3333 }
3334 }
3335
3336 rd_val = readl_relaxed(config_base + word_offset);
3337
3338 if (oper == RD) {
3339 *val = ((rd_val & mask) >> (8 * byte_offset));
3340 PCIE_DBG3(dev,
3341 "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
3342 rc_idx, bus->number, devfn, where, size, *val, rd_val);
3343 } else {
3344 wr_val = (rd_val & ~mask) |
3345 ((*val << (8 * byte_offset)) & mask);
3346
3347 if ((bus->number == 0) && (where == 0x3c))
3348 wr_val = wr_val | (3 << 16);
3349
3350 writel_relaxed(wr_val, config_base + word_offset);
3351 wmb(); /* ensure config data is written to hardware register */
3352
Tony Truonge48ec872017-03-14 12:47:58 -07003353 if (dev->shadow_en) {
3354 if (rd_val == PCIE_LINK_DOWN &&
3355 (readl_relaxed(config_base) == PCIE_LINK_DOWN))
3356 PCIE_ERR(dev,
3357 "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
3358 rc_idx, bus->number, devfn,
3359 where, size);
3360 else
3361 msm_pcie_save_shadow(dev, word_offset, wr_val,
3362 bdf, rc);
3363 }
Tony Truong349ee492014-10-01 17:35:56 -07003364
3365 PCIE_DBG3(dev,
3366 "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
3367 rc_idx, bus->number, devfn, where, size,
3368 wr_val, rd_val, *val);
3369 }
3370
3371unlock:
3372 spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
3373out:
3374 return rv;
3375}
3376
3377static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
3378 int size, u32 *val)
3379{
3380 int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
3381
3382 if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
3383 *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
3384 PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
3385 }
3386
3387 return ret;
3388}
3389
3390static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
3391 int where, int size, u32 val)
3392{
3393 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
3394}
3395
3396static struct pci_ops msm_pcie_ops = {
3397 .read = msm_pcie_rd_conf,
3398 .write = msm_pcie_wr_conf,
3399};
3400
3401static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
3402{
3403 int rc = 0, i;
3404 struct msm_pcie_gpio_info_t *info;
3405
3406 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3407
3408 for (i = 0; i < dev->gpio_n; i++) {
3409 info = &dev->gpio[i];
3410
3411 if (!info->num)
3412 continue;
3413
3414 rc = gpio_request(info->num, info->name);
3415 if (rc) {
3416 PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
3417 dev->rc_idx, info->name, rc);
3418 break;
3419 }
3420
3421 if (info->out)
3422 rc = gpio_direction_output(info->num, info->init);
3423 else
3424 rc = gpio_direction_input(info->num);
3425 if (rc) {
3426 PCIE_ERR(dev,
3427 "PCIe: RC%d can't set direction for GPIO %s:%d\n",
3428 dev->rc_idx, info->name, rc);
3429 gpio_free(info->num);
3430 break;
3431 }
3432 }
3433
3434 if (rc)
3435 while (i--)
3436 gpio_free(dev->gpio[i].num);
3437
3438 return rc;
3439}
3440
3441static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
3442{
3443 int i;
3444
3445 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3446
3447 for (i = 0; i < dev->gpio_n; i++)
3448 gpio_free(dev->gpio[i].num);
3449}
3450
3451int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
3452{
3453 int i, rc = 0;
3454 struct regulator *vreg;
3455 struct msm_pcie_vreg_info_t *info;
3456
3457 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3458
3459 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
3460 info = &dev->vreg[i];
3461 vreg = info->hdl;
3462
3463 if (!vreg)
3464 continue;
3465
3466 PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
3467 dev->rc_idx, info->name);
3468 if (info->max_v) {
3469 rc = regulator_set_voltage(vreg,
3470 info->min_v, info->max_v);
3471 if (rc) {
3472 PCIE_ERR(dev,
3473 "PCIe: RC%d can't set voltage for %s: %d\n",
3474 dev->rc_idx, info->name, rc);
3475 break;
3476 }
3477 }
3478
3479 if (info->opt_mode) {
3480 rc = regulator_set_load(vreg, info->opt_mode);
3481 if (rc < 0) {
3482 PCIE_ERR(dev,
3483 "PCIe: RC%d can't set mode for %s: %d\n",
3484 dev->rc_idx, info->name, rc);
3485 break;
3486 }
3487 }
3488
3489 rc = regulator_enable(vreg);
3490 if (rc) {
3491 PCIE_ERR(dev,
3492 "PCIe: RC%d can't enable regulator %s: %d\n",
3493 dev->rc_idx, info->name, rc);
3494 break;
3495 }
3496 }
3497
3498 if (rc)
3499 while (i--) {
3500 struct regulator *hdl = dev->vreg[i].hdl;
3501
3502 if (hdl) {
3503 regulator_disable(hdl);
3504 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3505 PCIE_DBG(dev,
3506 "RC%d: Removing %s vote.\n",
3507 dev->rc_idx,
3508 dev->vreg[i].name);
3509 regulator_set_voltage(hdl,
3510 RPM_REGULATOR_CORNER_NONE,
3511 INT_MAX);
3512 }
3513 }
3514
3515 }
3516
3517 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3518
3519 return rc;
3520}
3521
3522static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
3523{
3524 int i;
3525
3526 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3527
3528 for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
3529 if (dev->vreg[i].hdl) {
3530 PCIE_DBG(dev, "Vreg %s is being disabled\n",
3531 dev->vreg[i].name);
3532 regulator_disable(dev->vreg[i].hdl);
3533
3534 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3535 PCIE_DBG(dev,
3536 "RC%d: Removing %s vote.\n",
3537 dev->rc_idx,
3538 dev->vreg[i].name);
3539 regulator_set_voltage(dev->vreg[i].hdl,
3540 RPM_REGULATOR_CORNER_NONE,
3541 INT_MAX);
3542 }
3543 }
3544 }
3545
3546 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3547}
3548
3549static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
3550{
3551 int i, rc = 0;
3552 struct msm_pcie_clk_info_t *info;
3553 struct msm_pcie_reset_info_t *reset_info;
3554
3555 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3556
3557 rc = regulator_enable(dev->gdsc);
3558
3559 if (rc) {
3560 PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
3561 dev->rc_idx, dev->pdev->name);
3562 return rc;
3563 }
3564
3565 if (dev->gdsc_smmu) {
3566 rc = regulator_enable(dev->gdsc_smmu);
3567
3568 if (rc) {
3569 PCIE_ERR(dev,
3570 "PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
3571 dev->rc_idx, dev->pdev->name);
3572 return rc;
3573 }
3574 }
3575
3576 PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
3577 if (dev->bus_client) {
3578 rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
3579 if (rc) {
3580 PCIE_ERR(dev,
3581 "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
3582 dev->rc_idx, rc);
3583 return rc;
3584 }
3585
3586 PCIE_DBG2(dev,
3587 "PCIe: set bus bandwidth for RC%d.\n",
3588 dev->rc_idx);
3589 }
3590
3591 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3592 info = &dev->clk[i];
3593
3594 if (!info->hdl)
3595 continue;
3596
3597 if (info->config_mem)
3598 msm_pcie_config_clock_mem(dev, info);
3599
3600 if (info->freq) {
3601 rc = clk_set_rate(info->hdl, info->freq);
3602 if (rc) {
3603 PCIE_ERR(dev,
3604 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3605 dev->rc_idx, info->name, rc);
3606 break;
3607 }
3608
3609 PCIE_DBG2(dev,
3610 "PCIe: RC%d set rate for clk %s.\n",
3611 dev->rc_idx, info->name);
3612 }
3613
3614 rc = clk_prepare_enable(info->hdl);
3615
3616 if (rc)
3617 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
3618 dev->rc_idx, info->name);
3619 else
3620 PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
3621 info->name, dev->rc_idx);
3622 }
3623
3624 if (rc) {
3625 PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
3626 dev->rc_idx);
3627 while (i--) {
3628 struct clk *hdl = dev->clk[i].hdl;
3629
3630 if (hdl)
3631 clk_disable_unprepare(hdl);
3632 }
3633
3634 if (dev->gdsc_smmu)
3635 regulator_disable(dev->gdsc_smmu);
3636
3637 regulator_disable(dev->gdsc);
3638 }
3639
3640 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3641 reset_info = &dev->reset[i];
3642 if (reset_info->hdl) {
3643 rc = reset_control_deassert(reset_info->hdl);
3644 if (rc)
3645 PCIE_ERR(dev,
3646 "PCIe: RC%d failed to deassert reset for %s.\n",
3647 dev->rc_idx, reset_info->name);
3648 else
3649 PCIE_DBG2(dev,
3650 "PCIe: RC%d successfully deasserted reset for %s.\n",
3651 dev->rc_idx, reset_info->name);
3652 }
3653 }
3654
3655 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3656
3657 return rc;
3658}
3659
3660static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
3661{
3662 int i;
3663 int rc;
3664
3665 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3666
3667 for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
3668 if (dev->clk[i].hdl)
3669 clk_disable_unprepare(dev->clk[i].hdl);
3670
3671 if (dev->bus_client) {
3672 PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
3673 dev->rc_idx);
3674
3675 rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
3676 if (rc)
3677 PCIE_ERR(dev,
3678 "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
3679 dev->rc_idx, rc);
3680 else
3681 PCIE_DBG(dev,
3682 "PCIe: relinquish bus bandwidth for RC%d.\n",
3683 dev->rc_idx);
3684 }
3685
3686 if (dev->gdsc_smmu)
3687 regulator_disable(dev->gdsc_smmu);
3688
3689 regulator_disable(dev->gdsc);
3690
3691 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3692}
3693
3694static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
3695{
3696 int i, rc = 0;
3697 struct msm_pcie_clk_info_t *info;
3698 struct msm_pcie_reset_info_t *pipe_reset_info;
3699
3700 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3701
3702 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3703 info = &dev->pipeclk[i];
3704
3705 if (!info->hdl)
3706 continue;
3707
3708
3709 if (info->config_mem)
3710 msm_pcie_config_clock_mem(dev, info);
3711
3712 if (info->freq) {
3713 rc = clk_set_rate(info->hdl, info->freq);
3714 if (rc) {
3715 PCIE_ERR(dev,
3716 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3717 dev->rc_idx, info->name, rc);
3718 break;
3719 }
3720
3721 PCIE_DBG2(dev,
3722 "PCIe: RC%d set rate for clk %s: %d.\n",
3723 dev->rc_idx, info->name, rc);
3724 }
3725
3726 rc = clk_prepare_enable(info->hdl);
3727
3728 if (rc)
3729 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
3730 dev->rc_idx, info->name);
3731 else
3732 PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
3733 dev->rc_idx, info->name);
3734 }
3735
3736 if (rc) {
3737 PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
3738 dev->rc_idx);
3739 while (i--)
3740 if (dev->pipeclk[i].hdl)
3741 clk_disable_unprepare(dev->pipeclk[i].hdl);
3742 }
3743
3744 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3745 pipe_reset_info = &dev->pipe_reset[i];
3746 if (pipe_reset_info->hdl) {
3747 rc = reset_control_deassert(
3748 pipe_reset_info->hdl);
3749 if (rc)
3750 PCIE_ERR(dev,
3751 "PCIe: RC%d failed to deassert pipe reset for %s.\n",
3752 dev->rc_idx, pipe_reset_info->name);
3753 else
3754 PCIE_DBG2(dev,
3755 "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
3756 dev->rc_idx, pipe_reset_info->name);
3757 }
3758 }
3759
3760 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3761
3762 return rc;
3763}
3764
3765static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
3766{
3767 int i;
3768
3769 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3770
3771 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
3772 if (dev->pipeclk[i].hdl)
3773 clk_disable_unprepare(
3774 dev->pipeclk[i].hdl);
3775
3776 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3777}
3778
3779static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
3780{
3781 int i;
3782 u8 type;
3783 struct msm_pcie_device_info *dev_table = dev->pcidev_table;
3784
3785 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3786 if (!dev_table[i].bdf)
3787 break;
3788
3789 type = dev_table[i].bdf >> 24 == 0x1 ?
3790 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3791
3792 msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
3793 dev_table[i].phy_address + SZ_4K - 1,
3794 dev_table[i].bdf);
3795 }
3796}
3797
3798static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
3799{
3800 int i;
3801
3802 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3803
3804 /*
3805 * program and enable address translation region 0 (device config
3806 * address space); region type config;
3807 * axi config address range to device config address range
3808 */
3809 if (dev->enumerated) {
3810 msm_pcie_iatu_config_all_ep(dev);
3811 } else {
3812 dev->current_bdf = 0; /* to force IATU re-config */
3813 msm_pcie_cfg_bdf(dev, 1, 0);
3814 }
3815
3816 /* configure N_FTS */
3817 PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3818 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3819 if (!dev->n_fts)
3820 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3821 0, BIT(15));
3822 else
3823 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3824 PCIE20_ACK_N_FTS,
3825 dev->n_fts << 8);
3826
3827 if (dev->shadow_en)
3828 dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
3829 readl_relaxed(dev->dm_core +
3830 PCIE20_ACK_F_ASPM_CTRL_REG);
3831
3832 PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3833 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3834
3835 /* configure AUX clock frequency register for PCIe core */
3836 if (dev->use_19p2mhz_aux_clk)
3837 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
3838 else
3839 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
3840
3841 /* configure the completion timeout value for PCIe core */
3842 if (dev->cpl_timeout && dev->bridge_found)
3843 msm_pcie_write_reg_field(dev->dm_core,
3844 PCIE20_DEVICE_CONTROL2_STATUS2,
3845 0xf, dev->cpl_timeout);
3846
3847 /* Enable AER on RC */
3848 if (dev->aer_enable) {
3849 msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
3850 BIT(16)|BIT(17));
3851 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
3852 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3853
3854 PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3855 readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
3856 }
3857
3858 /* configure SMMU registers */
3859 if (dev->smmu_exist) {
3860 msm_pcie_write_reg(dev->parf,
3861 PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
3862 msm_pcie_write_reg(dev->parf,
3863 PCIE20_PARF_SID_OFFSET, 0);
3864
3865 if (dev->enumerated) {
3866 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3867 if (dev->pcidev_table[i].dev &&
3868 dev->pcidev_table[i].short_bdf) {
3869 msm_pcie_write_reg(dev->parf,
3870 PCIE20_PARF_BDF_TRANSLATE_N +
3871 dev->pcidev_table[i].short_bdf
3872 * 4,
3873 dev->pcidev_table[i].bdf >> 16);
3874 }
3875 }
3876 }
3877 }
3878}
3879
3880static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
3881{
3882 u32 val;
3883 u32 current_offset;
3884 u32 ep_l1sub_ctrl1_offset = 0;
3885 u32 ep_l1sub_cap_reg1_offset = 0;
3886 u32 ep_link_cap_offset = 0;
3887 u32 ep_link_ctrlstts_offset = 0;
3888 u32 ep_dev_ctrl2stts2_offset = 0;
3889
3890 /* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/
3891 if (!dev->aux_clk_sync && dev->l1ss_supported)
3892 msm_pcie_write_mask(dev->parf +
3893 PCIE20_PARF_SYS_CTRL, BIT(3), 0);
3894
3895 current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
3896
3897 while (current_offset) {
3898 if (msm_pcie_check_align(dev, current_offset))
3899 return;
3900
3901 val = readl_relaxed(dev->conf + current_offset);
3902 if ((val & 0xff) == PCIE20_CAP_ID) {
3903 ep_link_cap_offset = current_offset + 0x0c;
3904 ep_link_ctrlstts_offset = current_offset + 0x10;
3905 ep_dev_ctrl2stts2_offset = current_offset + 0x28;
3906 break;
3907 }
3908 current_offset = (val >> 8) & 0xff;
3909 }
3910
3911 if (!ep_link_cap_offset) {
3912 PCIE_DBG(dev,
3913 "RC%d endpoint does not support PCIe capability registers\n",
3914 dev->rc_idx);
3915 return;
3916 }
3917
3918 PCIE_DBG(dev,
3919 "RC%d: ep_link_cap_offset: 0x%x\n",
3920 dev->rc_idx, ep_link_cap_offset);
3921
3922 if (dev->common_clk_en) {
3923 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3924 0, BIT(6));
3925
3926 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3927 0, BIT(6));
3928
3929 if (dev->shadow_en) {
3930 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3931 readl_relaxed(dev->dm_core +
3932 PCIE20_CAP_LINKCTRLSTATUS);
3933
3934 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3935 readl_relaxed(dev->conf +
3936 ep_link_ctrlstts_offset);
3937 }
3938
3939 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3940 readl_relaxed(dev->dm_core +
3941 PCIE20_CAP_LINKCTRLSTATUS));
3942 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3943 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3944 }
3945
3946 if (dev->clk_power_manage_en) {
3947 val = readl_relaxed(dev->conf + ep_link_cap_offset);
3948 if (val & BIT(18)) {
3949 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3950 0, BIT(8));
3951
3952 if (dev->shadow_en)
3953 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3954 readl_relaxed(dev->conf +
3955 ep_link_ctrlstts_offset);
3956
3957 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3958 readl_relaxed(dev->conf +
3959 ep_link_ctrlstts_offset));
3960 }
3961 }
3962
3963 if (dev->l0s_supported) {
3964 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3965 0, BIT(0));
3966 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3967 0, BIT(0));
3968 if (dev->shadow_en) {
3969 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3970 readl_relaxed(dev->dm_core +
3971 PCIE20_CAP_LINKCTRLSTATUS);
3972 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3973 readl_relaxed(dev->conf +
3974 ep_link_ctrlstts_offset);
3975 }
3976 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3977 readl_relaxed(dev->dm_core +
3978 PCIE20_CAP_LINKCTRLSTATUS));
3979 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3980 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3981 }
3982
3983 if (dev->l1_supported) {
3984 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3985 0, BIT(1));
3986 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3987 0, BIT(1));
3988 if (dev->shadow_en) {
3989 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3990 readl_relaxed(dev->dm_core +
3991 PCIE20_CAP_LINKCTRLSTATUS);
3992 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3993 readl_relaxed(dev->conf +
3994 ep_link_ctrlstts_offset);
3995 }
3996 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3997 readl_relaxed(dev->dm_core +
3998 PCIE20_CAP_LINKCTRLSTATUS));
3999 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
4000 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
4001 }
4002
4003 if (dev->l1ss_supported) {
4004 current_offset = PCIE_EXT_CAP_OFFSET;
4005 while (current_offset) {
4006 if (msm_pcie_check_align(dev, current_offset))
4007 return;
4008
4009 val = readl_relaxed(dev->conf + current_offset);
4010 if ((val & 0xffff) == L1SUB_CAP_ID) {
4011 ep_l1sub_cap_reg1_offset = current_offset + 0x4;
4012 ep_l1sub_ctrl1_offset = current_offset + 0x8;
4013 break;
4014 }
4015 current_offset = val >> 20;
4016 }
4017 if (!ep_l1sub_ctrl1_offset) {
4018 PCIE_DBG(dev,
4019 "RC%d endpoint does not support l1ss registers\n",
4020 dev->rc_idx);
4021 return;
4022 }
4023
4024 val = readl_relaxed(dev->conf + ep_l1sub_cap_reg1_offset);
4025
4026 PCIE_DBG2(dev, "EP's L1SUB_CAPABILITY_REG_1: 0x%x\n", val);
4027 PCIE_DBG2(dev, "RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
4028 dev->rc_idx, ep_l1sub_ctrl1_offset);
4029
4030 val &= 0xf;
4031
4032 msm_pcie_write_reg_field(dev->dm_core, PCIE20_L1SUB_CONTROL1,
4033 0xf, val);
4034 msm_pcie_write_mask(dev->dm_core +
4035 PCIE20_DEVICE_CONTROL2_STATUS2,
4036 0, BIT(10));
4037 msm_pcie_write_reg_field(dev->conf, ep_l1sub_ctrl1_offset,
4038 0xf, val);
4039 msm_pcie_write_mask(dev->conf + ep_dev_ctrl2stts2_offset,
4040 0, BIT(10));
4041 if (dev->shadow_en) {
4042 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
4043 readl_relaxed(dev->dm_core +
4044 PCIE20_L1SUB_CONTROL1);
4045 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
4046 readl_relaxed(dev->dm_core +
4047 PCIE20_DEVICE_CONTROL2_STATUS2);
4048 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
4049 readl_relaxed(dev->conf +
4050 ep_l1sub_ctrl1_offset);
4051 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
4052 readl_relaxed(dev->conf +
4053 ep_dev_ctrl2stts2_offset);
4054 }
4055 PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n",
4056 readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1));
4057 PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
4058 readl_relaxed(dev->dm_core +
4059 PCIE20_DEVICE_CONTROL2_STATUS2));
4060 PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n",
4061 readl_relaxed(dev->conf + ep_l1sub_ctrl1_offset));
4062 PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
4063 readl_relaxed(dev->conf +
4064 ep_dev_ctrl2stts2_offset));
4065 }
4066}
4067
4068void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
4069{
4070 int i;
4071
4072 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4073
4074 /* program MSI controller and enable all interrupts */
4075 writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
4076 writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
4077
4078 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
4079 writel_relaxed(~0, dev->dm_core +
4080 PCIE20_MSI_CTRL_INTR_EN + (i * 12));
4081
4082 /* ensure that hardware is configured before proceeding */
4083 wmb();
4084}
4085
4086static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
4087 struct platform_device *pdev)
4088{
4089 int i, len, cnt, ret = 0, size = 0;
4090 struct msm_pcie_vreg_info_t *vreg_info;
4091 struct msm_pcie_gpio_info_t *gpio_info;
4092 struct msm_pcie_clk_info_t *clk_info;
4093 struct resource *res;
4094 struct msm_pcie_res_info_t *res_info;
4095 struct msm_pcie_irq_info_t *irq_info;
4096 struct msm_pcie_irq_info_t *msi_info;
4097 struct msm_pcie_reset_info_t *reset_info;
4098 struct msm_pcie_reset_info_t *pipe_reset_info;
4099 char prop_name[MAX_PROP_SIZE];
4100 const __be32 *prop;
4101 u32 *clkfreq = NULL;
4102
4103 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4104
4105 cnt = of_property_count_strings((&pdev->dev)->of_node,
4106 "clock-names");
4107 if (cnt > 0) {
4108 clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
4109 sizeof(*clkfreq), GFP_KERNEL);
4110 if (!clkfreq) {
4111 PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
4112 dev->rc_idx);
4113 return -ENOMEM;
4114 }
4115 ret = of_property_read_u32_array(
4116 (&pdev->dev)->of_node,
4117 "max-clock-frequency-hz", clkfreq, cnt);
4118 if (ret) {
4119 PCIE_ERR(dev,
4120 "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
4121 dev->rc_idx, ret);
4122 goto out;
4123 }
4124 }
4125
4126 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
4127 vreg_info = &dev->vreg[i];
4128 vreg_info->hdl =
4129 devm_regulator_get(&pdev->dev, vreg_info->name);
4130
4131 if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
4132 PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
4133 vreg_info->name);
4134 ret = PTR_ERR(vreg_info->hdl);
4135 goto out;
4136 }
4137
4138 if (IS_ERR(vreg_info->hdl)) {
4139 if (vreg_info->required) {
4140 PCIE_DBG(dev, "Vreg %s doesn't exist\n",
4141 vreg_info->name);
4142 ret = PTR_ERR(vreg_info->hdl);
4143 goto out;
4144 } else {
4145 PCIE_DBG(dev,
4146 "Optional Vreg %s doesn't exist\n",
4147 vreg_info->name);
4148 vreg_info->hdl = NULL;
4149 }
4150 } else {
4151 dev->vreg_n++;
4152 snprintf(prop_name, MAX_PROP_SIZE,
4153 "qcom,%s-voltage-level", vreg_info->name);
4154 prop = of_get_property((&pdev->dev)->of_node,
4155 prop_name, &len);
4156 if (!prop || (len != (3 * sizeof(__be32)))) {
4157 PCIE_DBG(dev, "%s %s property\n",
4158 prop ? "invalid format" :
4159 "no", prop_name);
4160 } else {
4161 vreg_info->max_v = be32_to_cpup(&prop[0]);
4162 vreg_info->min_v = be32_to_cpup(&prop[1]);
4163 vreg_info->opt_mode =
4164 be32_to_cpup(&prop[2]);
4165 }
4166 }
4167 }
4168
4169 dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
4170
4171 if (IS_ERR(dev->gdsc)) {
4172 PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
4173 dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
4174 if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
4175 PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
4176 dev->pdev->name);
4177 ret = PTR_ERR(dev->gdsc);
4178 goto out;
4179 }
4180
4181 dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
4182
4183 if (IS_ERR(dev->gdsc_smmu)) {
4184 PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
4185 dev->rc_idx);
4186 dev->gdsc_smmu = NULL;
4187 }
4188
4189 dev->gpio_n = 0;
4190 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
4191 gpio_info = &dev->gpio[i];
4192 ret = of_get_named_gpio((&pdev->dev)->of_node,
4193 gpio_info->name, 0);
4194 if (ret >= 0) {
4195 gpio_info->num = ret;
4196 dev->gpio_n++;
4197 PCIE_DBG(dev, "GPIO num for %s is %d\n",
4198 gpio_info->name, gpio_info->num);
4199 } else {
4200 if (gpio_info->required) {
4201 PCIE_ERR(dev,
4202 "Could not get required GPIO %s\n",
4203 gpio_info->name);
4204 goto out;
4205 } else {
4206 PCIE_DBG(dev,
4207 "Could not get optional GPIO %s\n",
4208 gpio_info->name);
4209 }
4210 }
4211 ret = 0;
4212 }
4213
4214 of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
4215 if (size) {
4216 dev->phy_sequence = (struct msm_pcie_phy_info_t *)
4217 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4218
4219 if (dev->phy_sequence) {
4220 dev->phy_len =
4221 size / sizeof(*dev->phy_sequence);
4222
4223 of_property_read_u32_array(pdev->dev.of_node,
4224 "qcom,phy-sequence",
4225 (unsigned int *)dev->phy_sequence,
4226 size / sizeof(dev->phy_sequence->offset));
4227 } else {
4228 PCIE_ERR(dev,
4229 "RC%d: Could not allocate memory for phy init sequence.\n",
4230 dev->rc_idx);
4231 ret = -ENOMEM;
4232 goto out;
4233 }
4234 } else {
4235 PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
4236 dev->rc_idx);
4237 }
4238
4239 of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
4240 if (size) {
4241 dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
4242 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4243
4244 if (dev->port_phy_sequence) {
4245 dev->port_phy_len =
4246 size / sizeof(*dev->port_phy_sequence);
4247
4248 of_property_read_u32_array(pdev->dev.of_node,
4249 "qcom,port-phy-sequence",
4250 (unsigned int *)dev->port_phy_sequence,
4251 size / sizeof(dev->port_phy_sequence->offset));
4252 } else {
4253 PCIE_ERR(dev,
4254 "RC%d: Could not allocate memory for port phy init sequence.\n",
4255 dev->rc_idx);
4256 ret = -ENOMEM;
4257 goto out;
4258 }
4259 } else {
4260 PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
4261 dev->rc_idx);
4262 }
4263
4264 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
4265 clk_info = &dev->clk[i];
4266
4267 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4268
4269 if (IS_ERR(clk_info->hdl)) {
4270 if (clk_info->required) {
4271 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4272 clk_info->name, PTR_ERR(clk_info->hdl));
4273 ret = PTR_ERR(clk_info->hdl);
4274 goto out;
4275 } else {
4276 PCIE_DBG(dev, "Ignoring Clock %s\n",
4277 clk_info->name);
4278 clk_info->hdl = NULL;
4279 }
4280 } else {
4281 if (clkfreq != NULL) {
4282 clk_info->freq = clkfreq[i +
4283 MSM_PCIE_MAX_PIPE_CLK];
4284 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4285 clk_info->name, clk_info->freq);
4286 }
4287 }
4288 }
4289
4290 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
4291 clk_info = &dev->pipeclk[i];
4292
4293 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4294
4295 if (IS_ERR(clk_info->hdl)) {
4296 if (clk_info->required) {
4297 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4298 clk_info->name, PTR_ERR(clk_info->hdl));
4299 ret = PTR_ERR(clk_info->hdl);
4300 goto out;
4301 } else {
4302 PCIE_DBG(dev, "Ignoring Clock %s\n",
4303 clk_info->name);
4304 clk_info->hdl = NULL;
4305 }
4306 } else {
4307 if (clkfreq != NULL) {
4308 clk_info->freq = clkfreq[i];
4309 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4310 clk_info->name, clk_info->freq);
4311 }
4312 }
4313 }
4314
4315 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
4316 reset_info = &dev->reset[i];
4317
4318 reset_info->hdl = devm_reset_control_get(&pdev->dev,
4319 reset_info->name);
4320
4321 if (IS_ERR(reset_info->hdl)) {
4322 if (reset_info->required) {
4323 PCIE_DBG(dev,
4324 "Reset %s isn't available:%ld\n",
4325 reset_info->name,
4326 PTR_ERR(reset_info->hdl));
4327
4328 ret = PTR_ERR(reset_info->hdl);
4329 reset_info->hdl = NULL;
4330 goto out;
4331 } else {
4332 PCIE_DBG(dev, "Ignoring Reset %s\n",
4333 reset_info->name);
4334 reset_info->hdl = NULL;
4335 }
4336 }
4337 }
4338
4339 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
4340 pipe_reset_info = &dev->pipe_reset[i];
4341
4342 pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
4343 pipe_reset_info->name);
4344
4345 if (IS_ERR(pipe_reset_info->hdl)) {
4346 if (pipe_reset_info->required) {
4347 PCIE_DBG(dev,
4348 "Pipe Reset %s isn't available:%ld\n",
4349 pipe_reset_info->name,
4350 PTR_ERR(pipe_reset_info->hdl));
4351
4352 ret = PTR_ERR(pipe_reset_info->hdl);
4353 pipe_reset_info->hdl = NULL;
4354 goto out;
4355 } else {
4356 PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
4357 pipe_reset_info->name);
4358 pipe_reset_info->hdl = NULL;
4359 }
4360 }
4361 }
4362
4363 dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
4364 if (!dev->bus_scale_table) {
4365 PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
4366 dev->rc_idx, dev->pdev->name);
4367 dev->bus_client = 0;
4368 } else {
4369 dev->bus_client =
4370 msm_bus_scale_register_client(dev->bus_scale_table);
4371 if (!dev->bus_client) {
4372 PCIE_ERR(dev,
4373 "PCIe: Failed to register bus client for RC%d (%s)\n",
4374 dev->rc_idx, dev->pdev->name);
4375 msm_bus_cl_clear_pdata(dev->bus_scale_table);
4376 ret = -ENODEV;
4377 goto out;
4378 }
4379 }
4380
4381 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
4382 res_info = &dev->res[i];
4383
4384 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4385 res_info->name);
4386
4387 if (!res) {
4388 PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
4389 dev->rc_idx, res_info->name);
4390 } else {
4391 PCIE_DBG(dev, "start addr for %s is %pa.\n",
4392 res_info->name, &res->start);
4393
4394 res_info->base = devm_ioremap(&pdev->dev,
4395 res->start, resource_size(res));
4396 if (!res_info->base) {
4397 PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
4398 dev->rc_idx, res_info->name);
4399 ret = -ENOMEM;
4400 goto out;
4401 } else {
4402 res_info->resource = res;
4403 }
4404 }
4405 }
4406
4407 for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
4408 irq_info = &dev->irq[i];
4409
4410 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4411 irq_info->name);
4412
4413 if (!res) {
4414 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4415 dev->rc_idx, irq_info->name);
4416 } else {
4417 irq_info->num = res->start;
4418 PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
4419 irq_info->num);
4420 }
4421 }
4422
4423 for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
4424 msi_info = &dev->msi[i];
4425
4426 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4427 msi_info->name);
4428
4429 if (!res) {
4430 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4431 dev->rc_idx, msi_info->name);
4432 } else {
4433 msi_info->num = res->start;
4434 PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
4435 msi_info->num);
4436 }
4437 }
4438
4439 /* All allocations succeeded */
4440
4441 if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
4442 dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
4443 else
4444 dev->wake_n = 0;
4445
4446 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
4447 dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
4448 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
4449 dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
4450 dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
4451 dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
4452 dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
4453 dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
4454 dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
4455 dev->dev_io_res->flags = IORESOURCE_IO;
4456
4457out:
4458 kfree(clkfreq);
4459
4460 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4461
4462 return ret;
4463}
4464
4465static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
4466{
4467 dev->parf = NULL;
4468 dev->elbi = NULL;
4469 dev->dm_core = NULL;
4470 dev->conf = NULL;
4471 dev->bars = NULL;
4472 dev->tcsr = NULL;
4473 dev->dev_mem_res = NULL;
4474 dev->dev_io_res = NULL;
4475}
4476
4477int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
4478{
4479 int ret = 0;
4480 uint32_t val;
4481 long int retries = 0;
4482 int link_check_count = 0;
4483
4484 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4485
4486 mutex_lock(&dev->setup_lock);
4487
4488 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
4489 PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
4490 dev->rc_idx);
4491 goto out;
4492 }
4493
4494 /* assert PCIe reset link to keep EP in reset */
4495
4496 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4497 dev->rc_idx);
4498 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4499 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4500 usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
4501 PERST_PROPAGATION_DELAY_US_MAX);
4502
4503 /* enable power */
4504
4505 if (options & PM_VREG) {
4506 ret = msm_pcie_vreg_init(dev);
4507 if (ret)
4508 goto out;
4509 }
4510
4511 /* enable clocks */
4512 if (options & PM_CLK) {
4513 ret = msm_pcie_clk_init(dev);
4514 /* ensure that changes propagated to the hardware */
4515 wmb();
4516 if (ret)
4517 goto clk_fail;
4518 }
4519
4520 if (dev->scm_dev_id) {
4521 PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
4522 msm_pcie_restore_sec_config(dev);
4523 }
4524
4525 /* enable PCIe clocks and resets */
4526 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
4527
4528 /* change DBI base address */
4529 writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
4530
4531 writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
4532
4533 msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
4534 0, BIT(4));
4535
4536 /* enable selected IRQ */
4537 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
4538 msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
4539
4540 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
4541 BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
4542 BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
4543 BIT(MSM_PCIE_INT_EVT_AER_ERR) |
4544 BIT(MSM_PCIE_INT_EVT_MSI_0) |
4545 BIT(MSM_PCIE_INT_EVT_MSI_1) |
4546 BIT(MSM_PCIE_INT_EVT_MSI_2) |
4547 BIT(MSM_PCIE_INT_EVT_MSI_3) |
4548 BIT(MSM_PCIE_INT_EVT_MSI_4) |
4549 BIT(MSM_PCIE_INT_EVT_MSI_5) |
4550 BIT(MSM_PCIE_INT_EVT_MSI_6) |
4551 BIT(MSM_PCIE_INT_EVT_MSI_7));
4552
4553 PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
4554 dev->rc_idx,
4555 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
4556 }
4557
4558 if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_16M)
4559 writel_relaxed(SZ_32M, dev->parf +
4560 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4561 else if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_8M)
4562 writel_relaxed(SZ_16M, dev->parf +
4563 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4564 else
4565 writel_relaxed(SZ_8M, dev->parf +
4566 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4567
4568 if (dev->use_msi) {
4569 PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
4570 val = dev->wr_halt_size ? dev->wr_halt_size :
4571 readl_relaxed(dev->parf +
4572 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
4573
4574 msm_pcie_write_reg(dev->parf,
4575 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
4576 BIT(31) | val);
4577
4578 PCIE_DBG(dev,
4579 "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
4580 dev->rc_idx,
4581 readl_relaxed(dev->parf +
4582 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
4583 }
4584
4585 mutex_lock(&com_phy_lock);
4586 /* init PCIe PHY */
4587 if (!num_rc_on)
4588 pcie_phy_init(dev);
4589
4590 num_rc_on++;
4591 mutex_unlock(&com_phy_lock);
4592
4593 if (options & PM_PIPE_CLK) {
4594 usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
4595 PHY_STABILIZATION_DELAY_US_MAX);
4596 /* Enable the pipe clock */
4597 ret = msm_pcie_pipe_clk_init(dev);
4598 /* ensure that changes propagated to the hardware */
4599 wmb();
4600 if (ret)
4601 goto link_fail;
4602 }
4603
4604 PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
4605
4606 do {
4607 if (pcie_phy_is_ready(dev))
4608 break;
4609 retries++;
4610 usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
4611 REFCLK_STABILIZATION_DELAY_US_MAX);
4612 } while (retries < PHY_READY_TIMEOUT_COUNT);
4613
4614 PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
4615 dev->rc_idx, retries);
4616
4617 if (pcie_phy_is_ready(dev))
4618 PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
4619 else {
4620 PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
4621 dev->rc_idx);
4622 ret = -ENODEV;
4623 pcie_phy_dump(dev);
4624 goto link_fail;
4625 }
4626
4627 pcie_pcs_port_phy_init(dev);
4628
4629 if (dev->ep_latency)
4630 usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
4631
4632 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4633 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4634 dev->gpio[MSM_PCIE_GPIO_EP].on);
4635
4636 /* de-assert PCIe reset link to bring EP out of reset */
4637
4638 PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
4639 dev->rc_idx);
4640 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4641 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
4642 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
4643
4644 /* set max tlp read size */
4645 msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
4646 0x7000, dev->tlp_rd_size);
4647
4648 /* enable link training */
4649 msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
4650
4651 PCIE_DBG(dev, "%s", "check if link is up\n");
4652
4653 /* Wait for up to 100ms for the link to come up */
4654 do {
4655 usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
4656 val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
Tony Truongf9f5ff02017-03-29 11:28:44 -07004657 PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
4658 dev->rc_idx, (val >> 12) & 0x3f);
Tony Truong349ee492014-10-01 17:35:56 -07004659 } while ((!(val & XMLH_LINK_UP) ||
4660 !msm_pcie_confirm_linkup(dev, false, false, NULL))
4661 && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
4662
4663 if ((val & XMLH_LINK_UP) &&
4664 msm_pcie_confirm_linkup(dev, false, false, NULL)) {
4665 PCIE_DBG(dev, "Link is up after %d checkings\n",
4666 link_check_count);
4667 PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
4668 } else {
4669 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4670 dev->rc_idx);
4671 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4672 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4673 PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
4674 dev->rc_idx);
4675 ret = -1;
4676 goto link_fail;
4677 }
4678
4679 msm_pcie_config_controller(dev);
4680
4681 if (!dev->msi_gicm_addr)
4682 msm_pcie_config_msi_controller(dev);
4683
4684 msm_pcie_config_link_state(dev);
4685
4686 dev->link_status = MSM_PCIE_LINK_ENABLED;
4687 dev->power_on = true;
4688 dev->suspending = false;
4689 dev->link_turned_on_counter++;
4690
4691 goto out;
4692
4693link_fail:
4694 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4695 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4696 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4697 msm_pcie_write_reg(dev->phy,
4698 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4699 msm_pcie_write_reg(dev->phy,
4700 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4701
4702 mutex_lock(&com_phy_lock);
4703 num_rc_on--;
4704 if (!num_rc_on && dev->common_phy) {
4705 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4706 dev->rc_idx);
4707 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4708 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4709 }
4710 mutex_unlock(&com_phy_lock);
4711
4712 msm_pcie_pipe_clk_deinit(dev);
4713 msm_pcie_clk_deinit(dev);
4714clk_fail:
4715 msm_pcie_vreg_deinit(dev);
4716out:
4717 mutex_unlock(&dev->setup_lock);
4718
4719 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4720
4721 return ret;
4722}
4723
4724void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
4725{
4726 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4727
4728 mutex_lock(&dev->setup_lock);
4729
4730 if (!dev->power_on) {
4731 PCIE_DBG(dev,
4732 "PCIe: the link of RC%d is already power down.\n",
4733 dev->rc_idx);
4734 mutex_unlock(&dev->setup_lock);
4735 return;
4736 }
4737
4738 dev->link_status = MSM_PCIE_LINK_DISABLED;
4739 dev->power_on = false;
4740 dev->link_turned_off_counter++;
4741
4742 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4743 dev->rc_idx);
4744
4745 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4746 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4747
4748 msm_pcie_write_reg(dev->phy,
4749 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4750 msm_pcie_write_reg(dev->phy,
4751 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4752
4753 mutex_lock(&com_phy_lock);
4754 num_rc_on--;
4755 if (!num_rc_on && dev->common_phy) {
4756 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4757 dev->rc_idx);
4758 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4759 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4760 }
4761 mutex_unlock(&com_phy_lock);
4762
4763 if (options & PM_CLK) {
4764 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
4765 BIT(0));
4766 msm_pcie_clk_deinit(dev);
4767 }
4768
4769 if (options & PM_VREG)
4770 msm_pcie_vreg_deinit(dev);
4771
4772 if (options & PM_PIPE_CLK)
4773 msm_pcie_pipe_clk_deinit(dev);
4774
4775 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4776 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4777 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4778
4779 mutex_unlock(&dev->setup_lock);
4780
4781 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4782}
4783
4784static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
4785 struct msm_pcie_device_info *ep_dev_info)
4786{
4787 u32 val;
4788 void __iomem *ep_base = ep_dev_info->conf_base;
4789 u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
4790 0xff;
4791
4792 while (current_offset) {
4793 if (msm_pcie_check_align(dev, current_offset))
4794 return;
4795
4796 val = readl_relaxed(ep_base + current_offset);
4797 if ((val & 0xff) == PCIE20_CAP_ID) {
4798 ep_dev_info->dev_ctrlstts_offset =
4799 current_offset + 0x8;
4800 break;
4801 }
4802 current_offset = (val >> 8) & 0xff;
4803 }
4804
4805 if (!ep_dev_info->dev_ctrlstts_offset) {
4806 PCIE_DBG(dev,
4807 "RC%d endpoint does not support PCIe cap registers\n",
4808 dev->rc_idx);
4809 return;
4810 }
4811
4812 PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
4813 dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
4814
4815 /* Enable AER on EP */
4816 msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
4817 BIT(3)|BIT(2)|BIT(1)|BIT(0));
4818
4819 PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
4820 readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
4821}
4822
4823static int msm_pcie_config_device_table(struct device *dev, void *pdev)
4824{
4825 struct pci_dev *pcidev = to_pci_dev(dev);
4826 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
4827 struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
4828 struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
4829 int ret = 0;
4830 u32 rc_idx = pcie_dev->rc_idx;
4831 u32 i, index;
4832 u32 bdf = 0;
4833 u8 type;
4834 u32 h_type;
4835 u32 bme;
4836
4837 if (!pcidev) {
4838 PCIE_ERR(pcie_dev,
4839 "PCIe: Did not find PCI device in list for RC%d.\n",
4840 pcie_dev->rc_idx);
4841 return -ENODEV;
4842 }
4843
4844 PCIE_DBG(pcie_dev,
4845 "PCI device found: vendor-id:0x%x device-id:0x%x\n",
4846 pcidev->vendor, pcidev->device);
4847
4848 if (!pcidev->bus->number)
4849 return ret;
4850
4851 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4852 type = pcidev->bus->number == 1 ?
4853 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
4854
4855 for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
4856 if (msm_pcie_dev_tbl[i].bdf == bdf &&
4857 !msm_pcie_dev_tbl[i].dev) {
4858 for (index = 0; index < MAX_DEVICE_NUM; index++) {
4859 if (dev_table_t[index].bdf == bdf) {
4860 msm_pcie_dev_tbl[i].dev = pcidev;
4861 msm_pcie_dev_tbl[i].domain = rc_idx;
4862 msm_pcie_dev_tbl[i].conf_base =
4863 pcie_dev->conf + index * SZ_4K;
4864 msm_pcie_dev_tbl[i].phy_address =
4865 axi_conf->start + index * SZ_4K;
4866
4867 dev_table_t[index].dev = pcidev;
4868 dev_table_t[index].domain = rc_idx;
4869 dev_table_t[index].conf_base =
4870 pcie_dev->conf + index * SZ_4K;
4871 dev_table_t[index].phy_address =
4872 axi_conf->start + index * SZ_4K;
4873
4874 msm_pcie_iatu_config(pcie_dev, index,
4875 type,
4876 dev_table_t[index].phy_address,
4877 dev_table_t[index].phy_address
4878 + SZ_4K - 1,
4879 bdf);
4880
4881 h_type = readl_relaxed(
4882 dev_table_t[index].conf_base +
4883 PCIE20_HEADER_TYPE);
4884
4885 bme = readl_relaxed(
4886 dev_table_t[index].conf_base +
4887 PCIE20_COMMAND_STATUS);
4888
4889 if (h_type & (1 << 16)) {
4890 pci_write_config_dword(pcidev,
4891 PCIE20_COMMAND_STATUS,
4892 bme | 0x06);
4893 } else {
4894 pcie_dev->num_ep++;
4895 dev_table_t[index].registered =
4896 false;
4897 }
4898
4899 if (pcie_dev->num_ep > 1)
4900 pcie_dev->pending_ep_reg = true;
4901
4902 msm_pcie_config_ep_aer(pcie_dev,
4903 &dev_table_t[index]);
4904
4905 break;
4906 }
4907 }
4908 if (index == MAX_DEVICE_NUM) {
4909 PCIE_ERR(pcie_dev,
4910 "RC%d PCI device table is full.\n",
4911 rc_idx);
4912 ret = index;
4913 } else {
4914 break;
4915 }
4916 } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
4917 pcidev == msm_pcie_dev_tbl[i].dev) {
4918 break;
4919 }
4920 }
4921 if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
4922 PCIE_ERR(pcie_dev,
4923 "Global PCI device table is full: %d elements.\n",
4924 i);
4925 PCIE_ERR(pcie_dev,
4926 "Bus number is 0x%x\nDevice number is 0x%x\n",
4927 pcidev->bus->number, pcidev->devfn);
4928 ret = i;
4929 }
4930 return ret;
4931}
4932
4933int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
4934{
4935 struct pci_dev *pcidev;
4936 struct msm_pcie_dev_t *pcie_dev;
4937 struct pci_bus *bus;
4938 int i;
4939 u32 bdf;
4940
4941 if (!dev) {
4942 pr_err("%s: PCIe: endpoint device passed in is NULL\n",
4943 __func__);
4944 return MSM_PCIE_ERROR;
4945 }
4946
4947 pcidev = to_pci_dev(dev);
4948 if (!pcidev) {
4949 pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
4950 __func__);
4951 return MSM_PCIE_ERROR;
4952 }
4953
4954 bus = pcidev->bus;
4955 if (!bus) {
4956 pr_err("%s: PCIe: Bus of PCI device is NULL\n",
4957 __func__);
4958 return MSM_PCIE_ERROR;
4959 }
4960
4961 while (!pci_is_root_bus(bus))
4962 bus = bus->parent;
4963
4964 pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
4965 if (!pcie_dev) {
4966 pr_err("%s: PCIe: Could not get PCIe structure\n",
4967 __func__);
4968 return MSM_PCIE_ERROR;
4969 }
4970
4971 if (!pcie_dev->smmu_exist) {
4972 PCIE_DBG(pcie_dev,
4973 "PCIe: RC:%d: smmu does not exist\n",
4974 pcie_dev->rc_idx);
4975 return MSM_PCIE_ERROR;
4976 }
4977
4978 PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
4979 pcie_dev->rc_idx, dev);
4980 PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
4981 pcie_dev->rc_idx, pcidev);
4982
4983 *domain = pcie_dev->rc_idx;
4984
4985 if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
4986 pcie_dev->current_short_bdf++;
4987 } else {
4988 PCIE_ERR(pcie_dev,
4989 "PCIe: RC%d: No more short BDF left\n",
4990 pcie_dev->rc_idx);
4991 return MSM_PCIE_ERROR;
4992 }
4993
4994 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4995
4996 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4997 if (pcie_dev->pcidev_table[i].bdf == bdf) {
4998 *sid = pcie_dev->smmu_sid_base +
4999 ((pcie_dev->rc_idx << 4) |
5000 pcie_dev->current_short_bdf);
5001
5002 msm_pcie_write_reg(pcie_dev->parf,
5003 PCIE20_PARF_BDF_TRANSLATE_N +
5004 pcie_dev->current_short_bdf * 4,
5005 bdf >> 16);
5006
5007 pcie_dev->pcidev_table[i].sid = *sid;
5008 pcie_dev->pcidev_table[i].short_bdf =
5009 pcie_dev->current_short_bdf;
5010 break;
5011 }
5012 }
5013
5014 if (i == MAX_DEVICE_NUM) {
5015 pcie_dev->current_short_bdf--;
5016 PCIE_ERR(pcie_dev,
5017 "PCIe: RC%d could not find BDF:%d\n",
5018 pcie_dev->rc_idx, bdf);
5019 return MSM_PCIE_ERROR;
5020 }
5021
5022 PCIE_DBG(pcie_dev,
5023 "PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
5024 pcie_dev->rc_idx,
5025 bdf >> 24,
5026 bdf >> 19 & 0x1f,
5027 bdf >> 16 & 0x07,
5028 *sid);
5029
5030 return 0;
5031}
5032EXPORT_SYMBOL(msm_pcie_configure_sid);
5033
5034int msm_pcie_enumerate(u32 rc_idx)
5035{
5036 int ret = 0, bus_ret = 0, scan_ret = 0;
5037 struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
5038
5039 mutex_lock(&dev->enumerate_lock);
5040
5041 PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
5042
5043 if (!dev->drv_ready) {
5044 PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
5045 rc_idx);
5046 ret = -EPROBE_DEFER;
5047 goto out;
5048 }
5049
5050 if (!dev->enumerated) {
5051 ret = msm_pcie_enable(dev, PM_ALL);
5052
5053 /* kick start ARM PCI configuration framework */
5054 if (!ret) {
5055 struct pci_dev *pcidev = NULL;
5056 bool found = false;
5057 struct pci_bus *bus;
5058 resource_size_t iobase = 0;
5059 u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
5060 u32 vendor_id = ids & 0xffff;
5061 u32 device_id = (ids & 0xffff0000) >> 16;
5062 LIST_HEAD(res);
5063
5064 PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
5065 vendor_id, device_id);
5066
5067 ret = of_pci_get_host_bridge_resources(
5068 dev->pdev->dev.of_node,
5069 0, 0xff, &res, &iobase);
5070 if (ret) {
5071 PCIE_ERR(dev,
5072 "PCIe: failed to get host bridge resources for RC%d: %d\n",
5073 dev->rc_idx, ret);
5074 goto out;
5075 }
5076
5077 bus = pci_create_root_bus(&dev->pdev->dev, 0,
5078 &msm_pcie_ops,
5079 msm_pcie_setup_sys_data(dev),
5080 &res);
5081 if (!bus) {
5082 PCIE_ERR(dev,
5083 "PCIe: failed to create root bus for RC%d\n",
5084 dev->rc_idx);
5085 ret = -ENOMEM;
5086 goto out;
5087 }
5088
5089 scan_ret = pci_scan_child_bus(bus);
5090 PCIE_DBG(dev,
5091 "PCIe: RC%d: The max subordinate bus number discovered is %d\n",
5092 dev->rc_idx, ret);
5093
5094 msm_pcie_fixup_irqs(dev);
5095 pci_assign_unassigned_bus_resources(bus);
5096 pci_bus_add_devices(bus);
5097
5098 dev->enumerated = true;
5099
5100 msm_pcie_write_mask(dev->dm_core +
5101 PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
5102
5103 if (dev->cpl_timeout && dev->bridge_found)
5104 msm_pcie_write_reg_field(dev->dm_core,
5105 PCIE20_DEVICE_CONTROL2_STATUS2,
5106 0xf, dev->cpl_timeout);
5107
5108 if (dev->shadow_en) {
5109 u32 val = readl_relaxed(dev->dm_core +
5110 PCIE20_COMMAND_STATUS);
5111 PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
5112 val);
5113 dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
5114 }
5115
5116 do {
5117 pcidev = pci_get_device(vendor_id,
5118 device_id, pcidev);
5119 if (pcidev && (&msm_pcie_dev[rc_idx] ==
5120 (struct msm_pcie_dev_t *)
5121 PCIE_BUS_PRIV_DATA(pcidev->bus))) {
5122 msm_pcie_dev[rc_idx].dev = pcidev;
5123 found = true;
5124 PCIE_DBG(&msm_pcie_dev[rc_idx],
5125 "PCI device is found for RC%d\n",
5126 rc_idx);
5127 }
5128 } while (!found && pcidev);
5129
5130 if (!pcidev) {
5131 PCIE_ERR(dev,
5132 "PCIe: Did not find PCI device for RC%d.\n",
5133 dev->rc_idx);
5134 ret = -ENODEV;
5135 goto out;
5136 }
5137
5138 bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
5139 &msm_pcie_config_device_table);
5140
5141 if (bus_ret) {
5142 PCIE_ERR(dev,
5143 "PCIe: Failed to set up device table for RC%d\n",
5144 dev->rc_idx);
5145 ret = -ENODEV;
5146 goto out;
5147 }
5148 } else {
5149 PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
5150 dev->rc_idx);
5151 }
5152 } else {
5153 PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
5154 dev->rc_idx);
5155 }
5156
5157out:
5158 mutex_unlock(&dev->enumerate_lock);
5159
5160 return ret;
5161}
5162EXPORT_SYMBOL(msm_pcie_enumerate);
5163
5164static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
5165 enum msm_pcie_event event)
5166{
5167 if (dev->event_reg && dev->event_reg->callback &&
5168 (dev->event_reg->events & event)) {
5169 struct msm_pcie_notify *notify = &dev->event_reg->notify;
5170
5171 notify->event = event;
5172 notify->user = dev->event_reg->user;
5173 PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
5174 dev->rc_idx, event);
5175 dev->event_reg->callback(notify);
5176
5177 if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
5178 (event == MSM_PCIE_EVENT_LINKDOWN)) {
5179 dev->user_suspend = true;
5180 PCIE_DBG(dev,
5181 "PCIe: Client of RC%d will recover the link later.\n",
5182 dev->rc_idx);
5183 return;
5184 }
5185 } else {
5186 PCIE_DBG2(dev,
5187 "PCIe: Client of RC%d does not have registration for event %d\n",
5188 dev->rc_idx, event);
5189 }
5190}
5191
5192static void handle_wake_func(struct work_struct *work)
5193{
5194 int i, ret;
5195 struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
5196 handle_wake_work);
5197
5198 PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
5199
5200 mutex_lock(&dev->recovery_lock);
5201
5202 if (!dev->enumerated) {
5203 PCIE_DBG(dev,
5204 "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
5205 dev->rc_idx);
5206
5207 ret = msm_pcie_enumerate(dev->rc_idx);
5208 if (ret) {
5209 PCIE_ERR(dev,
5210 "PCIe: failed to enable RC%d upon wake request from the device.\n",
5211 dev->rc_idx);
5212 goto out;
5213 }
5214
5215 if (dev->num_ep > 1) {
5216 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5217 dev->event_reg = dev->pcidev_table[i].event_reg;
5218
5219 if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
5220 && dev->event_reg &&
5221 dev->event_reg->callback &&
5222 (dev->event_reg->events &
5223 MSM_PCIE_EVENT_LINKUP)) {
5224 struct msm_pcie_notify *notify =
5225 &dev->event_reg->notify;
5226 notify->event = MSM_PCIE_EVENT_LINKUP;
5227 notify->user = dev->event_reg->user;
5228 PCIE_DBG(dev,
5229 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5230 dev->rc_idx);
5231 dev->event_reg->callback(notify);
5232 }
5233 }
5234 } else {
5235 if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
5236 dev->event_reg && dev->event_reg->callback &&
5237 (dev->event_reg->events &
5238 MSM_PCIE_EVENT_LINKUP)) {
5239 struct msm_pcie_notify *notify =
5240 &dev->event_reg->notify;
5241 notify->event = MSM_PCIE_EVENT_LINKUP;
5242 notify->user = dev->event_reg->user;
5243 PCIE_DBG(dev,
5244 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5245 dev->rc_idx);
5246 dev->event_reg->callback(notify);
5247 } else {
5248 PCIE_DBG(dev,
5249 "PCIe: Client of RC%d does not have registration for linkup event.\n",
5250 dev->rc_idx);
5251 }
5252 }
5253 goto out;
5254 } else {
5255 PCIE_ERR(dev,
5256 "PCIe: The enumeration for RC%d has already been done.\n",
5257 dev->rc_idx);
5258 goto out;
5259 }
5260
5261out:
5262 mutex_unlock(&dev->recovery_lock);
5263}
5264
5265static irqreturn_t handle_aer_irq(int irq, void *data)
5266{
5267 struct msm_pcie_dev_t *dev = data;
5268
5269 int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
5270 int ep_corr_val = 0, ep_uncorr_val = 0;
5271 int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
5272 u32 ep_dev_ctrlstts_offset = 0;
5273 int i, j, ep_src_bdf = 0;
5274 void __iomem *ep_base = NULL;
5275 unsigned long irqsave_flags;
5276
5277 PCIE_DBG2(dev,
5278 "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
5279 dev->rc_idx, irq, dev->rc_corr_counter,
5280 dev->rc_non_fatal_counter, dev->rc_fatal_counter,
5281 dev->ep_corr_counter, dev->ep_non_fatal_counter,
5282 dev->ep_fatal_counter);
5283
5284 spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
5285
5286 if (dev->suspending) {
5287 PCIE_DBG2(dev,
5288 "PCIe: RC%d is currently suspending.\n",
5289 dev->rc_idx);
5290 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5291 return IRQ_HANDLED;
5292 }
5293
5294 uncorr_val = readl_relaxed(dev->dm_core +
5295 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5296 corr_val = readl_relaxed(dev->dm_core +
5297 PCIE20_AER_CORR_ERR_STATUS_REG);
5298 rc_err_status = readl_relaxed(dev->dm_core +
5299 PCIE20_AER_ROOT_ERR_STATUS_REG);
5300 rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
5301 PCIE20_CAP_DEVCTRLSTATUS);
5302
5303 if (uncorr_val)
5304 PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5305 uncorr_val);
5306 if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
5307 PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5308 corr_val);
5309
5310 if ((rc_dev_ctrlstts >> 18) & 0x1)
5311 dev->rc_fatal_counter++;
5312 if ((rc_dev_ctrlstts >> 17) & 0x1)
5313 dev->rc_non_fatal_counter++;
5314 if ((rc_dev_ctrlstts >> 16) & 0x1)
5315 dev->rc_corr_counter++;
5316
5317 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
5318 BIT(18)|BIT(17)|BIT(16));
5319
5320 if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
5321 PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
5322 goto out;
5323 }
5324
5325 for (i = 0; i < 2; i++) {
5326 if (i)
5327 ep_src_bdf = readl_relaxed(dev->dm_core +
5328 PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
5329 else
5330 ep_src_bdf = (readl_relaxed(dev->dm_core +
5331 PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
5332
5333 if (!ep_src_bdf)
5334 continue;
5335
5336 for (j = 0; j < MAX_DEVICE_NUM; j++) {
5337 if (ep_src_bdf == dev->pcidev_table[j].bdf) {
5338 PCIE_DBG2(dev,
5339 "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
5340 i ? "Uncorrectable" : "Correctable",
5341 dev->pcidev_table[j].bdf >> 24,
5342 dev->pcidev_table[j].bdf >> 19 & 0x1f,
5343 dev->pcidev_table[j].bdf >> 16 & 0x07);
5344 ep_base = dev->pcidev_table[j].conf_base;
5345 ep_dev_ctrlstts_offset = dev->
5346 pcidev_table[j].dev_ctrlstts_offset;
5347 break;
5348 }
5349 }
5350
5351 if (!ep_base) {
5352 PCIE_ERR(dev,
5353 "PCIe: RC%d no endpoint found for reported error\n",
5354 dev->rc_idx);
5355 goto out;
5356 }
5357
5358 ep_uncorr_val = readl_relaxed(ep_base +
5359 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5360 ep_corr_val = readl_relaxed(ep_base +
5361 PCIE20_AER_CORR_ERR_STATUS_REG);
5362 ep_dev_ctrlstts = readl_relaxed(ep_base +
5363 ep_dev_ctrlstts_offset);
5364
5365 if (ep_uncorr_val)
5366 PCIE_DBG(dev,
5367 "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5368 ep_uncorr_val);
5369 if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
5370 PCIE_DBG(dev,
5371 "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5372 ep_corr_val);
5373
5374 if ((ep_dev_ctrlstts >> 18) & 0x1)
5375 dev->ep_fatal_counter++;
5376 if ((ep_dev_ctrlstts >> 17) & 0x1)
5377 dev->ep_non_fatal_counter++;
5378 if ((ep_dev_ctrlstts >> 16) & 0x1)
5379 dev->ep_corr_counter++;
5380
5381 msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
5382 BIT(18)|BIT(17)|BIT(16));
5383
5384 msm_pcie_write_reg_field(ep_base,
5385 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5386 0x3fff031, 0x3fff031);
5387 msm_pcie_write_reg_field(ep_base,
5388 PCIE20_AER_CORR_ERR_STATUS_REG,
5389 0xf1c1, 0xf1c1);
5390 }
5391out:
5392 if (((dev->rc_corr_counter < corr_counter_limit) &&
5393 (dev->ep_corr_counter < corr_counter_limit)) ||
5394 uncorr_val || ep_uncorr_val)
5395 PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
5396 rc_err_status);
5397 msm_pcie_write_reg_field(dev->dm_core,
5398 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5399 0x3fff031, 0x3fff031);
5400 msm_pcie_write_reg_field(dev->dm_core,
5401 PCIE20_AER_CORR_ERR_STATUS_REG,
5402 0xf1c1, 0xf1c1);
5403 msm_pcie_write_reg_field(dev->dm_core,
5404 PCIE20_AER_ROOT_ERR_STATUS_REG,
5405 0x7f, 0x7f);
5406
5407 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5408 return IRQ_HANDLED;
5409}
5410
5411static irqreturn_t handle_wake_irq(int irq, void *data)
5412{
5413 struct msm_pcie_dev_t *dev = data;
5414 unsigned long irqsave_flags;
5415 int i;
5416
5417 spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
5418
5419 dev->wake_counter++;
5420 PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
5421 dev->wake_counter, dev->rc_idx);
5422
5423 PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
5424 dev->rc_idx);
5425
5426 if (!dev->enumerated) {
5427 PCIE_DBG(dev, "Start enumeating RC%d\n", dev->rc_idx);
5428 if (dev->ep_wakeirq)
5429 schedule_work(&dev->handle_wake_work);
5430 else
5431 PCIE_DBG(dev,
5432 "wake irq is received but ep_wakeirq is not supported for RC%d.\n",
5433 dev->rc_idx);
5434 } else {
5435 PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
5436 __pm_stay_awake(&dev->ws);
5437 __pm_relax(&dev->ws);
5438
5439 if (dev->num_ep > 1) {
5440 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5441 dev->event_reg =
5442 dev->pcidev_table[i].event_reg;
5443 msm_pcie_notify_client(dev,
5444 MSM_PCIE_EVENT_WAKEUP);
5445 }
5446 } else {
5447 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
5448 }
5449 }
5450
5451 spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
5452
5453 return IRQ_HANDLED;
5454}
5455
5456static irqreturn_t handle_linkdown_irq(int irq, void *data)
5457{
5458 struct msm_pcie_dev_t *dev = data;
5459 unsigned long irqsave_flags;
5460 int i;
5461
5462 spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
5463
5464 dev->linkdown_counter++;
5465
5466 PCIE_DBG(dev,
5467 "PCIe: No. %ld linkdown IRQ for RC%d.\n",
5468 dev->linkdown_counter, dev->rc_idx);
5469
5470 if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
5471 PCIE_DBG(dev,
5472 "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
5473 dev->rc_idx);
5474 } else if (dev->suspending) {
5475 PCIE_DBG(dev,
5476 "PCIe:the link of RC%d is suspending.\n",
5477 dev->rc_idx);
5478 } else {
5479 dev->link_status = MSM_PCIE_LINK_DISABLED;
5480 dev->shadow_en = false;
5481
5482 if (dev->linkdown_panic)
5483 panic("User has chosen to panic on linkdown\n");
5484
5485 /* assert PERST */
5486 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
5487 dev->gpio[MSM_PCIE_GPIO_PERST].on);
5488 PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
5489
5490 if (dev->num_ep > 1) {
5491 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5492 dev->event_reg =
5493 dev->pcidev_table[i].event_reg;
5494 msm_pcie_notify_client(dev,
5495 MSM_PCIE_EVENT_LINKDOWN);
5496 }
5497 } else {
5498 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
5499 }
5500 }
5501
5502 spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
5503
5504 return IRQ_HANDLED;
5505}
5506
5507static irqreturn_t handle_msi_irq(int irq, void *data)
5508{
5509 int i, j;
5510 unsigned long val;
5511 struct msm_pcie_dev_t *dev = data;
5512 void __iomem *ctrl_status;
5513
5514 PCIE_DUMP(dev, "irq: %d\n", irq);
5515
5516 /*
5517 * check for set bits, clear it by setting that bit
5518 * and trigger corresponding irq
5519 */
5520 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
5521 ctrl_status = dev->dm_core +
5522 PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
5523
5524 val = readl_relaxed(ctrl_status);
5525 while (val) {
5526 j = find_first_bit(&val, 32);
5527 writel_relaxed(BIT(j), ctrl_status);
5528 /* ensure that interrupt is cleared (acked) */
5529 wmb();
5530 generic_handle_irq(
5531 irq_find_mapping(dev->irq_domain, (j + (32*i)))
5532 );
5533 val = readl_relaxed(ctrl_status);
5534 }
5535 }
5536
5537 return IRQ_HANDLED;
5538}
5539
5540static irqreturn_t handle_global_irq(int irq, void *data)
5541{
5542 int i;
5543 struct msm_pcie_dev_t *dev = data;
5544 unsigned long irqsave_flags;
5545 u32 status = 0;
5546
5547 spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
5548
5549 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
5550 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
5551
5552 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
5553
5554 PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
5555 dev->rc_idx, irq, status);
5556
5557 for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
5558 if (status & BIT(i)) {
5559 switch (i) {
5560 case MSM_PCIE_INT_EVT_LINK_DOWN:
5561 PCIE_DBG(dev,
5562 "PCIe: RC%d: handle linkdown event.\n",
5563 dev->rc_idx);
5564 handle_linkdown_irq(irq, data);
5565 break;
5566 case MSM_PCIE_INT_EVT_AER_LEGACY:
5567 PCIE_DBG(dev,
5568 "PCIe: RC%d: AER legacy event.\n",
5569 dev->rc_idx);
5570 handle_aer_irq(irq, data);
5571 break;
5572 case MSM_PCIE_INT_EVT_AER_ERR:
5573 PCIE_DBG(dev,
5574 "PCIe: RC%d: AER event.\n",
5575 dev->rc_idx);
5576 handle_aer_irq(irq, data);
5577 break;
5578 default:
5579 PCIE_ERR(dev,
5580 "PCIe: RC%d: Unexpected event %d is caught!\n",
5581 dev->rc_idx, i);
5582 }
5583 }
5584 }
5585
5586 spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
5587
5588 return IRQ_HANDLED;
5589}
5590
Tony Truong52122a62017-03-23 18:00:34 -07005591static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
5592 struct pci_dev *pdev)
5593{
5594 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
5595 int bypass_en = 0;
5596
5597 if (!domain) {
5598 PCIE_DBG(dev,
5599 "PCIe: RC%d: client does not have an iommu domain\n",
5600 dev->rc_idx);
5601 return;
5602 }
5603
5604 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
5605 if (!bypass_en) {
5606 int ret;
5607 phys_addr_t pcie_base_addr =
5608 dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
5609 dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
5610
5611 ret = iommu_unmap(domain, iova, PAGE_SIZE);
5612 if (ret != PAGE_SIZE)
5613 PCIE_ERR(dev,
5614 "PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
5615 dev->rc_idx, ret);
5616 }
5617}
5618
Tony Truongc3c52ae2017-03-29 12:16:51 -07005619void msm_pcie_destroy_irq(unsigned int irq)
Tony Truong349ee492014-10-01 17:35:56 -07005620{
Tony Truongc3c52ae2017-03-29 12:16:51 -07005621 int pos;
5622 struct pci_dev *pdev = irq_get_chip_data(irq);
5623 struct msi_desc *entry = irq_get_msi_desc(irq);
5624 struct msi_desc *firstentry;
Tony Truong349ee492014-10-01 17:35:56 -07005625 struct msm_pcie_dev_t *dev;
Tony Truongc3c52ae2017-03-29 12:16:51 -07005626 u32 nvec;
5627 int firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07005628
Tony Truongc3c52ae2017-03-29 12:16:51 -07005629 if (!pdev) {
5630 pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
Tony Truong349ee492014-10-01 17:35:56 -07005631 return;
5632 }
5633
Tony Truongc3c52ae2017-03-29 12:16:51 -07005634 dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5635 if (!dev) {
5636 pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
5637 return;
5638 }
5639
5640 if (!entry) {
5641 PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
5642 dev->rc_idx, irq);
5643 return;
5644 }
5645
5646 firstentry = first_pci_msi_entry(pdev);
5647 if (!firstentry) {
5648 PCIE_ERR(dev,
5649 "PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
5650 dev->rc_idx, irq);
5651 return;
5652 }
5653
5654 firstirq = firstentry->irq;
5655 nvec = (1 << entry->msi_attrib.multiple);
5656
Tony Truong349ee492014-10-01 17:35:56 -07005657 if (dev->msi_gicm_addr) {
5658 PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
5659
Tony Truongc3c52ae2017-03-29 12:16:51 -07005660 if (irq < firstirq || irq > firstirq + nvec - 1) {
Tony Truong349ee492014-10-01 17:35:56 -07005661 PCIE_ERR(dev,
5662 "Could not find irq: %d in RC%d MSI table\n",
5663 irq, dev->rc_idx);
5664 return;
5665 }
Tony Truong52122a62017-03-23 18:00:34 -07005666 if (irq == firstirq + nvec - 1)
5667 msm_pcie_unmap_qgic_addr(dev, pdev);
Tony Truongc3c52ae2017-03-29 12:16:51 -07005668 pos = irq - firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07005669 } else {
5670 PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
5671 pos = irq - irq_find_mapping(dev->irq_domain, 0);
5672 }
5673
5674 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5675
5676 PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
5677 pos, *dev->msi_irq_in_use);
5678 clear_bit(pos, dev->msi_irq_in_use);
5679 PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
5680 pos, *dev->msi_irq_in_use);
5681}
5682
5683/* hookup to linux pci msi framework */
5684void arch_teardown_msi_irq(unsigned int irq)
5685{
5686 PCIE_GEN_DBG("irq %d deallocated\n", irq);
Tony Truongc3c52ae2017-03-29 12:16:51 -07005687 msm_pcie_destroy_irq(irq);
Tony Truong349ee492014-10-01 17:35:56 -07005688}
5689
5690void arch_teardown_msi_irqs(struct pci_dev *dev)
5691{
5692 struct msi_desc *entry;
5693 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5694
5695 PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
5696 pcie_dev->rc_idx, dev->vendor, dev->device);
5697
5698 pcie_dev->use_msi = false;
5699
5700 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5701 int i, nvec;
5702
5703 if (entry->irq == 0)
5704 continue;
5705 nvec = 1 << entry->msi_attrib.multiple;
5706 for (i = 0; i < nvec; i++)
Tony Truongc3c52ae2017-03-29 12:16:51 -07005707 arch_teardown_msi_irq(entry->irq + i);
Tony Truong349ee492014-10-01 17:35:56 -07005708 }
5709}
5710
5711static void msm_pcie_msi_nop(struct irq_data *d)
5712{
5713}
5714
5715static struct irq_chip pcie_msi_chip = {
5716 .name = "msm-pcie-msi",
5717 .irq_ack = msm_pcie_msi_nop,
5718 .irq_enable = unmask_msi_irq,
5719 .irq_disable = mask_msi_irq,
5720 .irq_mask = mask_msi_irq,
5721 .irq_unmask = unmask_msi_irq,
5722};
5723
5724static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
5725{
5726 int irq, pos;
5727
5728 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5729
5730again:
5731 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5732
5733 if (pos >= PCIE_MSI_NR_IRQS)
5734 return -ENOSPC;
5735
5736 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5737
5738 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5739 goto again;
5740 else
5741 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5742
5743 irq = irq_create_mapping(dev->irq_domain, pos);
5744 if (!irq)
5745 return -EINVAL;
5746
5747 return irq;
5748}
5749
5750static int arch_setup_msi_irq_default(struct pci_dev *pdev,
5751 struct msi_desc *desc, int nvec)
5752{
5753 int irq;
5754 struct msi_msg msg;
5755 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5756
5757 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5758
5759 irq = msm_pcie_create_irq(dev);
5760
5761 PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
5762
5763 if (irq < 0)
5764 return irq;
5765
5766 PCIE_DBG(dev, "irq %d allocated\n", irq);
5767
Tony Truongc3c52ae2017-03-29 12:16:51 -07005768 irq_set_chip_data(irq, pdev);
Tony Truong349ee492014-10-01 17:35:56 -07005769 irq_set_msi_desc(irq, desc);
5770
5771 /* write msi vector and data */
5772 msg.address_hi = 0;
5773 msg.address_lo = MSM_PCIE_MSI_PHY;
5774 msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
5775 write_msi_msg(irq, &msg);
5776
5777 return 0;
5778}
5779
5780static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
5781{
5782 int irq, pos;
5783
5784 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5785
5786again:
5787 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5788
5789 if (pos >= PCIE_MSI_NR_IRQS)
5790 return -ENOSPC;
5791
5792 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5793
5794 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5795 goto again;
5796 else
5797 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5798
5799 if (pos >= MSM_PCIE_MAX_MSI) {
5800 PCIE_ERR(dev,
5801 "PCIe: RC%d: pos %d is not less than %d\n",
5802 dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
5803 return MSM_PCIE_ERROR;
5804 }
5805
5806 irq = dev->msi[pos].num;
5807 if (!irq) {
5808 PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
5809 dev->rc_idx);
5810 return -EINVAL;
5811 }
5812
5813 return irq;
5814}
5815
Tony Truong52122a62017-03-23 18:00:34 -07005816static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
5817 struct pci_dev *pdev,
5818 struct msi_msg *msg)
5819{
5820 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
5821 int ret, bypass_en = 0;
5822 dma_addr_t iova;
5823 phys_addr_t pcie_base_addr, gicm_db_offset;
5824
5825 msg->address_hi = 0;
5826 msg->address_lo = dev->msi_gicm_addr;
5827
5828 if (!domain) {
5829 PCIE_DBG(dev,
5830 "PCIe: RC%d: client does not have an iommu domain\n",
5831 dev->rc_idx);
5832 return 0;
5833 }
5834
5835 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
5836
5837 PCIE_DBG(dev,
5838 "PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
5839 dev->rc_idx, bypass_en ? "bypass" : "enabled",
5840 pdev->bus->number, pdev->devfn);
5841
5842 if (bypass_en)
5843 return 0;
5844
5845 gicm_db_offset = dev->msi_gicm_addr -
5846 rounddown(dev->msi_gicm_addr, PAGE_SIZE);
5847 /*
5848 * Use PCIe DBI address as the IOVA since client cannot
5849 * use this address for their IOMMU mapping. This will
5850 * prevent any conflicts between PCIe host and
5851 * client's mapping.
5852 */
5853 pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
5854 iova = rounddown(pcie_base_addr, PAGE_SIZE);
5855
5856 ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
5857 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
5858 if (ret < 0) {
5859 PCIE_ERR(dev,
5860 "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
5861 dev->rc_idx, ret);
5862 return -ENOMEM;
5863 }
5864
5865 msg->address_lo = iova + gicm_db_offset;
5866
5867 return 0;
5868}
5869
Tony Truong349ee492014-10-01 17:35:56 -07005870static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
5871 struct msi_desc *desc, int nvec)
5872{
Tony Truong52122a62017-03-23 18:00:34 -07005873 int irq, index, ret, firstirq = 0;
Tony Truong349ee492014-10-01 17:35:56 -07005874 struct msi_msg msg;
5875 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5876
5877 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5878
5879 for (index = 0; index < nvec; index++) {
5880 irq = msm_pcie_create_irq_qgic(dev);
5881 PCIE_DBG(dev, "irq %d is allocated\n", irq);
5882
5883 if (irq < 0)
5884 return irq;
5885
5886 if (index == 0)
5887 firstirq = irq;
5888
5889 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
Tony Truongc3c52ae2017-03-29 12:16:51 -07005890 irq_set_chip_data(irq, pdev);
Tony Truong349ee492014-10-01 17:35:56 -07005891 }
5892
5893 /* write msi vector and data */
5894 irq_set_msi_desc(firstirq, desc);
Tony Truong52122a62017-03-23 18:00:34 -07005895
5896 ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
5897 if (ret)
5898 return ret;
5899
Tony Truong349ee492014-10-01 17:35:56 -07005900 msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
5901 write_msi_msg(firstirq, &msg);
5902
5903 return 0;
5904}
5905
5906int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
5907{
5908 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5909
5910 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5911
5912 if (dev->msi_gicm_addr)
5913 return arch_setup_msi_irq_qgic(pdev, desc, 1);
5914 else
5915 return arch_setup_msi_irq_default(pdev, desc, 1);
5916}
5917
5918static int msm_pcie_get_msi_multiple(int nvec)
5919{
5920 int msi_multiple = 0;
5921
5922 while (nvec) {
5923 nvec = nvec >> 1;
5924 msi_multiple++;
5925 }
5926 PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
5927 msi_multiple - 1);
5928
5929 return msi_multiple - 1;
5930}
5931
5932int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
5933{
5934 struct msi_desc *entry;
5935 int ret;
5936 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5937
5938 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5939
5940 if (type != PCI_CAP_ID_MSI || nvec > 32)
5941 return -ENOSPC;
5942
5943 PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
5944
5945 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5946 entry->msi_attrib.multiple =
5947 msm_pcie_get_msi_multiple(nvec);
5948
5949 if (pcie_dev->msi_gicm_addr)
5950 ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
5951 else
5952 ret = arch_setup_msi_irq_default(dev, entry, nvec);
5953
5954 PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
5955
5956 if (ret < 0)
5957 return ret;
5958 if (ret > 0)
5959 return -ENOSPC;
5960 }
5961
5962 pcie_dev->use_msi = true;
5963
5964 return 0;
5965}
5966
5967static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
5968 irq_hw_number_t hwirq)
5969{
5970 irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
Tony Truong349ee492014-10-01 17:35:56 -07005971 return 0;
5972}
5973
5974static const struct irq_domain_ops msm_pcie_msi_ops = {
5975 .map = msm_pcie_msi_map,
5976};
5977
5978int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
5979{
5980 int rc;
5981 int msi_start = 0;
5982 struct device *pdev = &dev->pdev->dev;
5983
5984 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5985
5986 if (dev->rc_idx)
5987 wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
5988 else
5989 wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
5990
5991 /* register handler for linkdown interrupt */
5992 if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
5993 rc = devm_request_irq(pdev,
5994 dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
5995 handle_linkdown_irq,
5996 IRQF_TRIGGER_RISING,
5997 dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
5998 dev);
5999 if (rc) {
6000 PCIE_ERR(dev,
6001 "PCIe: Unable to request linkdown interrupt:%d\n",
6002 dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
6003 return rc;
6004 }
6005 }
6006
6007 /* register handler for physical MSI interrupt line */
6008 if (dev->irq[MSM_PCIE_INT_MSI].num) {
6009 rc = devm_request_irq(pdev,
6010 dev->irq[MSM_PCIE_INT_MSI].num,
6011 handle_msi_irq,
6012 IRQF_TRIGGER_RISING,
6013 dev->irq[MSM_PCIE_INT_MSI].name,
6014 dev);
6015 if (rc) {
6016 PCIE_ERR(dev,
6017 "PCIe: RC%d: Unable to request MSI interrupt\n",
6018 dev->rc_idx);
6019 return rc;
6020 }
6021 }
6022
6023 /* register handler for AER interrupt */
6024 if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
6025 rc = devm_request_irq(pdev,
6026 dev->irq[MSM_PCIE_INT_PLS_ERR].num,
6027 handle_aer_irq,
6028 IRQF_TRIGGER_RISING,
6029 dev->irq[MSM_PCIE_INT_PLS_ERR].name,
6030 dev);
6031 if (rc) {
6032 PCIE_ERR(dev,
6033 "PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
6034 dev->rc_idx,
6035 dev->irq[MSM_PCIE_INT_PLS_ERR].num);
6036 return rc;
6037 }
6038 }
6039
6040 /* register handler for AER legacy interrupt */
6041 if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
6042 rc = devm_request_irq(pdev,
6043 dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
6044 handle_aer_irq,
6045 IRQF_TRIGGER_RISING,
6046 dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
6047 dev);
6048 if (rc) {
6049 PCIE_ERR(dev,
6050 "PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
6051 dev->rc_idx,
6052 dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
6053 return rc;
6054 }
6055 }
6056
6057 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
6058 rc = devm_request_irq(pdev,
6059 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
6060 handle_global_irq,
6061 IRQF_TRIGGER_RISING,
6062 dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
6063 dev);
6064 if (rc) {
6065 PCIE_ERR(dev,
6066 "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
6067 dev->rc_idx,
6068 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
6069 return rc;
6070 }
6071 }
6072
6073 /* register handler for PCIE_WAKE_N interrupt line */
6074 if (dev->wake_n) {
6075 rc = devm_request_irq(pdev,
6076 dev->wake_n, handle_wake_irq,
6077 IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
6078 if (rc) {
6079 PCIE_ERR(dev,
6080 "PCIe: RC%d: Unable to request wake interrupt\n",
6081 dev->rc_idx);
6082 return rc;
6083 }
6084
6085 INIT_WORK(&dev->handle_wake_work, handle_wake_func);
6086
6087 rc = enable_irq_wake(dev->wake_n);
6088 if (rc) {
6089 PCIE_ERR(dev,
6090 "PCIe: RC%d: Unable to enable wake interrupt\n",
6091 dev->rc_idx);
6092 return rc;
6093 }
6094 }
6095
6096 /* Create a virtual domain of interrupts */
6097 if (!dev->msi_gicm_addr) {
6098 dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
6099 PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
6100
6101 if (!dev->irq_domain) {
6102 PCIE_ERR(dev,
6103 "PCIe: RC%d: Unable to initialize irq domain\n",
6104 dev->rc_idx);
6105
6106 if (dev->wake_n)
6107 disable_irq(dev->wake_n);
6108
6109 return PTR_ERR(dev->irq_domain);
6110 }
6111
6112 msi_start = irq_create_mapping(dev->irq_domain, 0);
6113 }
6114
6115 return 0;
6116}
6117
6118void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
6119{
6120 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
6121
6122 wakeup_source_trash(&dev->ws);
6123
6124 if (dev->wake_n)
6125 disable_irq(dev->wake_n);
6126}
6127
6128
6129static int msm_pcie_probe(struct platform_device *pdev)
6130{
6131 int ret = 0;
6132 int rc_idx = -1;
6133 int i, j;
6134
6135 PCIE_GEN_DBG("%s\n", __func__);
6136
6137 mutex_lock(&pcie_drv.drv_lock);
6138
6139 ret = of_property_read_u32((&pdev->dev)->of_node,
6140 "cell-index", &rc_idx);
6141 if (ret) {
6142 PCIE_GEN_DBG("Did not find RC index.\n");
6143 goto out;
6144 } else {
6145 if (rc_idx >= MAX_RC_NUM) {
6146 pr_err(
6147 "PCIe: Invalid RC Index %d (max supported = %d)\n",
6148 rc_idx, MAX_RC_NUM);
6149 goto out;
6150 }
6151 pcie_drv.rc_num++;
6152 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
6153 rc_idx);
6154 }
6155
6156 msm_pcie_dev[rc_idx].l0s_supported =
6157 of_property_read_bool((&pdev->dev)->of_node,
6158 "qcom,l0s-supported");
6159 PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
6160 msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
6161 msm_pcie_dev[rc_idx].l1_supported =
6162 of_property_read_bool((&pdev->dev)->of_node,
6163 "qcom,l1-supported");
6164 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
6165 msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
6166 msm_pcie_dev[rc_idx].l1ss_supported =
6167 of_property_read_bool((&pdev->dev)->of_node,
6168 "qcom,l1ss-supported");
6169 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
6170 msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
6171 msm_pcie_dev[rc_idx].common_clk_en =
6172 of_property_read_bool((&pdev->dev)->of_node,
6173 "qcom,common-clk-en");
6174 PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
6175 msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
6176 msm_pcie_dev[rc_idx].clk_power_manage_en =
6177 of_property_read_bool((&pdev->dev)->of_node,
6178 "qcom,clk-power-manage-en");
6179 PCIE_DBG(&msm_pcie_dev[rc_idx],
6180 "Clock power management is %s enabled.\n",
6181 msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
6182 msm_pcie_dev[rc_idx].aux_clk_sync =
6183 of_property_read_bool((&pdev->dev)->of_node,
6184 "qcom,aux-clk-sync");
6185 PCIE_DBG(&msm_pcie_dev[rc_idx],
6186 "AUX clock is %s synchronous to Core clock.\n",
6187 msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
6188
6189 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
6190 of_property_read_bool((&pdev->dev)->of_node,
6191 "qcom,use-19p2mhz-aux-clk");
6192 PCIE_DBG(&msm_pcie_dev[rc_idx],
6193 "AUX clock frequency is %s 19.2MHz.\n",
6194 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
6195
6196 msm_pcie_dev[rc_idx].smmu_exist =
6197 of_property_read_bool((&pdev->dev)->of_node,
6198 "qcom,smmu-exist");
6199 PCIE_DBG(&msm_pcie_dev[rc_idx],
6200 "SMMU does %s exist.\n",
6201 msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
6202
6203 msm_pcie_dev[rc_idx].smmu_sid_base = 0;
6204 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
6205 &msm_pcie_dev[rc_idx].smmu_sid_base);
6206 if (ret)
6207 PCIE_DBG(&msm_pcie_dev[rc_idx],
6208 "RC%d SMMU sid base not found\n",
6209 msm_pcie_dev[rc_idx].rc_idx);
6210 else
6211 PCIE_DBG(&msm_pcie_dev[rc_idx],
6212 "RC%d: qcom,smmu-sid-base: 0x%x.\n",
6213 msm_pcie_dev[rc_idx].rc_idx,
6214 msm_pcie_dev[rc_idx].smmu_sid_base);
6215
6216 msm_pcie_dev[rc_idx].ep_wakeirq =
6217 of_property_read_bool((&pdev->dev)->of_node,
6218 "qcom,ep-wakeirq");
6219 PCIE_DBG(&msm_pcie_dev[rc_idx],
6220 "PCIe: EP of RC%d does %s assert wake when it is up.\n",
6221 rc_idx, msm_pcie_dev[rc_idx].ep_wakeirq ? "" : "not");
6222
6223 msm_pcie_dev[rc_idx].phy_ver = 1;
6224 ret = of_property_read_u32((&pdev->dev)->of_node,
6225 "qcom,pcie-phy-ver",
6226 &msm_pcie_dev[rc_idx].phy_ver);
6227 if (ret)
6228 PCIE_DBG(&msm_pcie_dev[rc_idx],
6229 "RC%d: pcie-phy-ver does not exist.\n",
6230 msm_pcie_dev[rc_idx].rc_idx);
6231 else
6232 PCIE_DBG(&msm_pcie_dev[rc_idx],
6233 "RC%d: pcie-phy-ver: %d.\n",
6234 msm_pcie_dev[rc_idx].rc_idx,
6235 msm_pcie_dev[rc_idx].phy_ver);
6236
6237 msm_pcie_dev[rc_idx].n_fts = 0;
6238 ret = of_property_read_u32((&pdev->dev)->of_node,
6239 "qcom,n-fts",
6240 &msm_pcie_dev[rc_idx].n_fts);
6241
6242 if (ret)
6243 PCIE_DBG(&msm_pcie_dev[rc_idx],
6244 "n-fts does not exist. ret=%d\n", ret);
6245 else
6246 PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
6247 msm_pcie_dev[rc_idx].n_fts);
6248
6249 msm_pcie_dev[rc_idx].common_phy =
6250 of_property_read_bool((&pdev->dev)->of_node,
6251 "qcom,common-phy");
6252 PCIE_DBG(&msm_pcie_dev[rc_idx],
6253 "PCIe: RC%d: Common PHY does %s exist.\n",
6254 rc_idx, msm_pcie_dev[rc_idx].common_phy ? "" : "not");
6255
6256 msm_pcie_dev[rc_idx].ext_ref_clk =
6257 of_property_read_bool((&pdev->dev)->of_node,
6258 "qcom,ext-ref-clk");
6259 PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
6260 msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
6261
6262 msm_pcie_dev[rc_idx].ep_latency = 0;
6263 ret = of_property_read_u32((&pdev->dev)->of_node,
6264 "qcom,ep-latency",
6265 &msm_pcie_dev[rc_idx].ep_latency);
6266 if (ret)
6267 PCIE_DBG(&msm_pcie_dev[rc_idx],
6268 "RC%d: ep-latency does not exist.\n",
6269 rc_idx);
6270 else
6271 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
6272 rc_idx, msm_pcie_dev[rc_idx].ep_latency);
6273
6274 msm_pcie_dev[rc_idx].wr_halt_size = 0;
6275 ret = of_property_read_u32(pdev->dev.of_node,
6276 "qcom,wr-halt-size",
6277 &msm_pcie_dev[rc_idx].wr_halt_size);
6278 if (ret)
6279 PCIE_DBG(&msm_pcie_dev[rc_idx],
6280 "RC%d: wr-halt-size not specified in dt. Use default value.\n",
6281 rc_idx);
6282 else
6283 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
6284 rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
6285
6286 msm_pcie_dev[rc_idx].cpl_timeout = 0;
6287 ret = of_property_read_u32((&pdev->dev)->of_node,
6288 "qcom,cpl-timeout",
6289 &msm_pcie_dev[rc_idx].cpl_timeout);
6290 if (ret)
6291 PCIE_DBG(&msm_pcie_dev[rc_idx],
6292 "RC%d: Using default cpl-timeout.\n",
6293 rc_idx);
6294 else
6295 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
6296 rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
6297
6298 msm_pcie_dev[rc_idx].perst_delay_us_min =
6299 PERST_PROPAGATION_DELAY_US_MIN;
6300 ret = of_property_read_u32(pdev->dev.of_node,
6301 "qcom,perst-delay-us-min",
6302 &msm_pcie_dev[rc_idx].perst_delay_us_min);
6303 if (ret)
6304 PCIE_DBG(&msm_pcie_dev[rc_idx],
6305 "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
6306 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6307 else
6308 PCIE_DBG(&msm_pcie_dev[rc_idx],
6309 "RC%d: perst-delay-us-min: %dus.\n",
6310 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6311
6312 msm_pcie_dev[rc_idx].perst_delay_us_max =
6313 PERST_PROPAGATION_DELAY_US_MAX;
6314 ret = of_property_read_u32(pdev->dev.of_node,
6315 "qcom,perst-delay-us-max",
6316 &msm_pcie_dev[rc_idx].perst_delay_us_max);
6317 if (ret)
6318 PCIE_DBG(&msm_pcie_dev[rc_idx],
6319 "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
6320 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6321 else
6322 PCIE_DBG(&msm_pcie_dev[rc_idx],
6323 "RC%d: perst-delay-us-max: %dus.\n",
6324 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6325
6326 msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
6327 ret = of_property_read_u32(pdev->dev.of_node,
6328 "qcom,tlp-rd-size",
6329 &msm_pcie_dev[rc_idx].tlp_rd_size);
6330 if (ret)
6331 PCIE_DBG(&msm_pcie_dev[rc_idx],
6332 "RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
6333 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6334 else
6335 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
6336 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6337
6338 msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
6339 msm_pcie_dev[rc_idx].msi_gicm_base = 0;
6340 ret = of_property_read_u32((&pdev->dev)->of_node,
6341 "qcom,msi-gicm-addr",
6342 &msm_pcie_dev[rc_idx].msi_gicm_addr);
6343
6344 if (ret) {
6345 PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
6346 "msi-gicm-addr does not exist.\n");
6347 } else {
6348 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
6349 msm_pcie_dev[rc_idx].msi_gicm_addr);
6350
6351 ret = of_property_read_u32((&pdev->dev)->of_node,
6352 "qcom,msi-gicm-base",
6353 &msm_pcie_dev[rc_idx].msi_gicm_base);
6354
6355 if (ret) {
6356 PCIE_ERR(&msm_pcie_dev[rc_idx],
6357 "PCIe: RC%d: msi-gicm-base does not exist.\n",
6358 rc_idx);
6359 goto decrease_rc_num;
6360 } else {
6361 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
6362 msm_pcie_dev[rc_idx].msi_gicm_base);
6363 }
6364 }
6365
6366 msm_pcie_dev[rc_idx].scm_dev_id = 0;
6367 ret = of_property_read_u32((&pdev->dev)->of_node,
6368 "qcom,scm-dev-id",
6369 &msm_pcie_dev[rc_idx].scm_dev_id);
6370
6371 msm_pcie_dev[rc_idx].rc_idx = rc_idx;
6372 msm_pcie_dev[rc_idx].pdev = pdev;
6373 msm_pcie_dev[rc_idx].vreg_n = 0;
6374 msm_pcie_dev[rc_idx].gpio_n = 0;
6375 msm_pcie_dev[rc_idx].parf_deemph = 0;
6376 msm_pcie_dev[rc_idx].parf_swing = 0;
6377 msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
6378 msm_pcie_dev[rc_idx].user_suspend = false;
6379 msm_pcie_dev[rc_idx].disable_pc = false;
6380 msm_pcie_dev[rc_idx].saved_state = NULL;
6381 msm_pcie_dev[rc_idx].enumerated = false;
6382 msm_pcie_dev[rc_idx].num_active_ep = 0;
6383 msm_pcie_dev[rc_idx].num_ep = 0;
6384 msm_pcie_dev[rc_idx].pending_ep_reg = false;
6385 msm_pcie_dev[rc_idx].phy_len = 0;
6386 msm_pcie_dev[rc_idx].port_phy_len = 0;
6387 msm_pcie_dev[rc_idx].phy_sequence = NULL;
6388 msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
6389 msm_pcie_dev[rc_idx].event_reg = NULL;
6390 msm_pcie_dev[rc_idx].linkdown_counter = 0;
6391 msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
6392 msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
6393 msm_pcie_dev[rc_idx].rc_corr_counter = 0;
6394 msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
6395 msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
6396 msm_pcie_dev[rc_idx].ep_corr_counter = 0;
6397 msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
6398 msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
6399 msm_pcie_dev[rc_idx].suspending = false;
6400 msm_pcie_dev[rc_idx].wake_counter = 0;
6401 msm_pcie_dev[rc_idx].aer_enable = true;
6402 msm_pcie_dev[rc_idx].power_on = false;
6403 msm_pcie_dev[rc_idx].current_short_bdf = 0;
6404 msm_pcie_dev[rc_idx].use_msi = false;
6405 msm_pcie_dev[rc_idx].use_pinctrl = false;
6406 msm_pcie_dev[rc_idx].linkdown_panic = false;
6407 msm_pcie_dev[rc_idx].bridge_found = false;
6408 memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
6409 sizeof(msm_pcie_vreg_info));
6410 memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
6411 sizeof(msm_pcie_gpio_info));
6412 memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
6413 sizeof(msm_pcie_clk_info[rc_idx]));
6414 memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
6415 sizeof(msm_pcie_pipe_clk_info[rc_idx]));
6416 memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
6417 sizeof(msm_pcie_res_info));
6418 memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
6419 sizeof(msm_pcie_irq_info));
6420 memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
6421 sizeof(msm_pcie_msi_info));
6422 memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
6423 sizeof(msm_pcie_reset_info[rc_idx]));
6424 memcpy(msm_pcie_dev[rc_idx].pipe_reset,
6425 msm_pcie_pipe_reset_info[rc_idx],
6426 sizeof(msm_pcie_pipe_reset_info[rc_idx]));
6427 msm_pcie_dev[rc_idx].shadow_en = true;
6428 for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
6429 msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
6430 for (i = 0; i < MAX_DEVICE_NUM; i++)
6431 for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
6432 msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
6433 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6434 msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
6435 msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
6436 msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
6437 msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
6438 msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
6439 msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0;
6440 msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
6441 msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
6442 msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
6443 msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
6444 }
6445
Tony Truongbd9a3412017-02-27 18:30:13 -08006446 dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
6447 msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
6448
Tony Truong349ee492014-10-01 17:35:56 -07006449 ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
6450 msm_pcie_dev[rc_idx].pdev);
6451
6452 if (ret)
6453 goto decrease_rc_num;
6454
6455 msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
6456 if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
6457 PCIE_ERR(&msm_pcie_dev[rc_idx],
6458 "PCIe: RC%d failed to get pinctrl\n",
6459 rc_idx);
6460 else
6461 msm_pcie_dev[rc_idx].use_pinctrl = true;
6462
6463 if (msm_pcie_dev[rc_idx].use_pinctrl) {
6464 msm_pcie_dev[rc_idx].pins_default =
6465 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6466 "default");
6467 if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
6468 PCIE_ERR(&msm_pcie_dev[rc_idx],
6469 "PCIe: RC%d could not get pinctrl default state\n",
6470 rc_idx);
6471 msm_pcie_dev[rc_idx].pins_default = NULL;
6472 }
6473
6474 msm_pcie_dev[rc_idx].pins_sleep =
6475 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6476 "sleep");
6477 if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
6478 PCIE_ERR(&msm_pcie_dev[rc_idx],
6479 "PCIe: RC%d could not get pinctrl sleep state\n",
6480 rc_idx);
6481 msm_pcie_dev[rc_idx].pins_sleep = NULL;
6482 }
6483 }
6484
6485 ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
6486 if (ret) {
6487 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6488 goto decrease_rc_num;
6489 }
6490
6491 ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
6492 if (ret) {
6493 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6494 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6495 goto decrease_rc_num;
6496 }
6497
6498 msm_pcie_dev[rc_idx].drv_ready = true;
6499
6500 if (msm_pcie_dev[rc_idx].ep_wakeirq) {
6501 PCIE_DBG(&msm_pcie_dev[rc_idx],
6502 "PCIe: RC%d will be enumerated upon WAKE signal from Endpoint.\n",
6503 rc_idx);
6504 mutex_unlock(&pcie_drv.drv_lock);
6505 return 0;
6506 }
6507
6508 ret = msm_pcie_enumerate(rc_idx);
6509
6510 if (ret)
6511 PCIE_ERR(&msm_pcie_dev[rc_idx],
6512 "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
6513 rc_idx);
6514 else
6515 PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
6516 rc_idx);
6517
6518 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
6519 dev_name(&(pdev->dev)));
6520
6521 mutex_unlock(&pcie_drv.drv_lock);
6522 return 0;
6523
6524decrease_rc_num:
6525 pcie_drv.rc_num--;
6526out:
6527 if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
6528 pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
6529 rc_idx);
6530 else
6531 PCIE_ERR(&msm_pcie_dev[rc_idx],
6532 "PCIe: Driver probe failed for RC%d:%d\n",
6533 rc_idx, ret);
6534
6535 mutex_unlock(&pcie_drv.drv_lock);
6536
6537 return ret;
6538}
6539
6540static int msm_pcie_remove(struct platform_device *pdev)
6541{
6542 int ret = 0;
6543 int rc_idx;
6544
6545 PCIE_GEN_DBG("PCIe:%s.\n", __func__);
6546
6547 mutex_lock(&pcie_drv.drv_lock);
6548
6549 ret = of_property_read_u32((&pdev->dev)->of_node,
6550 "cell-index", &rc_idx);
6551 if (ret) {
6552 pr_err("%s: Did not find RC index.\n", __func__);
6553 goto out;
6554 } else {
6555 pcie_drv.rc_num--;
6556 PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
6557 }
6558
6559 msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
6560 msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
6561 msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
6562 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6563 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6564
6565out:
6566 mutex_unlock(&pcie_drv.drv_lock);
6567
6568 return ret;
6569}
6570
6571static const struct of_device_id msm_pcie_match[] = {
6572 { .compatible = "qcom,pci-msm",
6573 },
6574 {}
6575};
6576
6577static struct platform_driver msm_pcie_driver = {
6578 .probe = msm_pcie_probe,
6579 .remove = msm_pcie_remove,
6580 .driver = {
6581 .name = "pci-msm",
6582 .owner = THIS_MODULE,
6583 .of_match_table = msm_pcie_match,
6584 },
6585};
6586
6587int __init pcie_init(void)
6588{
6589 int ret = 0, i;
6590 char rc_name[MAX_RC_NAME_LEN];
6591
6592 pr_alert("pcie:%s.\n", __func__);
6593
6594 pcie_drv.rc_num = 0;
6595 mutex_init(&pcie_drv.drv_lock);
6596 mutex_init(&com_phy_lock);
6597
6598 for (i = 0; i < MAX_RC_NUM; i++) {
6599 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
6600 msm_pcie_dev[i].ipc_log =
6601 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6602 if (msm_pcie_dev[i].ipc_log == NULL)
6603 pr_err("%s: unable to create IPC log context for %s\n",
6604 __func__, rc_name);
6605 else
6606 PCIE_DBG(&msm_pcie_dev[i],
6607 "PCIe IPC logging is enable for RC%d\n",
6608 i);
6609 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
6610 msm_pcie_dev[i].ipc_log_long =
6611 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6612 if (msm_pcie_dev[i].ipc_log_long == NULL)
6613 pr_err("%s: unable to create IPC log context for %s\n",
6614 __func__, rc_name);
6615 else
6616 PCIE_DBG(&msm_pcie_dev[i],
6617 "PCIe IPC logging %s is enable for RC%d\n",
6618 rc_name, i);
6619 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
6620 msm_pcie_dev[i].ipc_log_dump =
6621 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6622 if (msm_pcie_dev[i].ipc_log_dump == NULL)
6623 pr_err("%s: unable to create IPC log context for %s\n",
6624 __func__, rc_name);
6625 else
6626 PCIE_DBG(&msm_pcie_dev[i],
6627 "PCIe IPC logging %s is enable for RC%d\n",
6628 rc_name, i);
6629 spin_lock_init(&msm_pcie_dev[i].cfg_lock);
6630 msm_pcie_dev[i].cfg_access = true;
6631 mutex_init(&msm_pcie_dev[i].enumerate_lock);
6632 mutex_init(&msm_pcie_dev[i].setup_lock);
6633 mutex_init(&msm_pcie_dev[i].recovery_lock);
6634 spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
6635 spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
6636 spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
6637 spin_lock_init(&msm_pcie_dev[i].aer_lock);
6638 msm_pcie_dev[i].drv_ready = false;
6639 }
6640 for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
6641 msm_pcie_dev_tbl[i].bdf = 0;
6642 msm_pcie_dev_tbl[i].dev = NULL;
6643 msm_pcie_dev_tbl[i].short_bdf = 0;
6644 msm_pcie_dev_tbl[i].sid = 0;
6645 msm_pcie_dev_tbl[i].domain = -1;
6646 msm_pcie_dev_tbl[i].conf_base = 0;
6647 msm_pcie_dev_tbl[i].phy_address = 0;
6648 msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
6649 msm_pcie_dev_tbl[i].event_reg = NULL;
6650 msm_pcie_dev_tbl[i].registered = true;
6651 }
6652
6653 msm_pcie_debugfs_init();
6654
6655 ret = platform_driver_register(&msm_pcie_driver);
6656
6657 return ret;
6658}
6659
6660static void __exit pcie_exit(void)
6661{
Tony Truongbd9a3412017-02-27 18:30:13 -08006662 int i;
6663
Tony Truong349ee492014-10-01 17:35:56 -07006664 PCIE_GEN_DBG("pcie:%s.\n", __func__);
6665
6666 platform_driver_unregister(&msm_pcie_driver);
6667
6668 msm_pcie_debugfs_exit();
Tony Truongbd9a3412017-02-27 18:30:13 -08006669
6670 for (i = 0; i < MAX_RC_NUM; i++)
6671 msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
Tony Truong349ee492014-10-01 17:35:56 -07006672}
6673
6674subsys_initcall_sync(pcie_init);
6675module_exit(pcie_exit);
6676
6677
6678/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
6679static void msm_pcie_fixup_early(struct pci_dev *dev)
6680{
6681 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6682
6683 PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
6684 if (dev->hdr_type == 1)
6685 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
6686}
6687DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6688 msm_pcie_fixup_early);
6689
6690/* Suspend the PCIe link */
6691static int msm_pcie_pm_suspend(struct pci_dev *dev,
6692 void *user, void *data, u32 options)
6693{
6694 int ret = 0;
6695 u32 val = 0;
6696 int ret_l23;
6697 unsigned long irqsave_flags;
6698 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6699
6700 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6701
6702 spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
6703 pcie_dev->suspending = true;
6704 spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
6705
6706 if (!pcie_dev->power_on) {
6707 PCIE_DBG(pcie_dev,
6708 "PCIe: power of RC%d has been turned off.\n",
6709 pcie_dev->rc_idx);
6710 return ret;
6711 }
6712
6713 if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
6714 && msm_pcie_confirm_linkup(pcie_dev, true, true,
6715 pcie_dev->conf)) {
6716 ret = pci_save_state(dev);
6717 pcie_dev->saved_state = pci_store_saved_state(dev);
6718 }
6719 if (ret) {
6720 PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
6721 pcie_dev->rc_idx, ret);
6722 pcie_dev->suspending = false;
6723 return ret;
6724 }
6725
6726 spin_lock_irqsave(&pcie_dev->cfg_lock,
6727 pcie_dev->irqsave_flags);
6728 pcie_dev->cfg_access = false;
6729 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6730 pcie_dev->irqsave_flags);
6731
6732 msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
6733 BIT(4));
6734
6735 PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
6736 pcie_dev->rc_idx);
6737
6738 ret_l23 = readl_poll_timeout((pcie_dev->parf
6739 + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
6740
6741 /* check L23_Ready */
6742 PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
6743 pcie_dev->rc_idx,
6744 readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
6745 if (!ret_l23)
6746 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
6747 pcie_dev->rc_idx);
6748 else
6749 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
6750 pcie_dev->rc_idx);
6751
6752 msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6753
6754 if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
6755 pinctrl_select_state(pcie_dev->pinctrl,
6756 pcie_dev->pins_sleep);
6757
6758 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6759
6760 return ret;
6761}
6762
6763static void msm_pcie_fixup_suspend(struct pci_dev *dev)
6764{
6765 int ret;
6766 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6767
6768 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6769
6770 if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
6771 return;
6772
6773 spin_lock_irqsave(&pcie_dev->cfg_lock,
6774 pcie_dev->irqsave_flags);
6775 if (pcie_dev->disable_pc) {
6776 PCIE_DBG(pcie_dev,
6777 "RC%d: Skip suspend because of user request\n",
6778 pcie_dev->rc_idx);
6779 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6780 pcie_dev->irqsave_flags);
6781 return;
6782 }
6783 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6784 pcie_dev->irqsave_flags);
6785
6786 mutex_lock(&pcie_dev->recovery_lock);
6787
6788 ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
6789 if (ret)
6790 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
6791 pcie_dev->rc_idx, ret);
6792
6793 mutex_unlock(&pcie_dev->recovery_lock);
6794}
6795DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6796 msm_pcie_fixup_suspend);
6797
6798/* Resume the PCIe link */
6799static int msm_pcie_pm_resume(struct pci_dev *dev,
6800 void *user, void *data, u32 options)
6801{
6802 int ret;
6803 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6804
6805 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6806
6807 if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
6808 pinctrl_select_state(pcie_dev->pinctrl,
6809 pcie_dev->pins_default);
6810
6811 spin_lock_irqsave(&pcie_dev->cfg_lock,
6812 pcie_dev->irqsave_flags);
6813 pcie_dev->cfg_access = true;
6814 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6815 pcie_dev->irqsave_flags);
6816
6817 ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6818 if (ret) {
6819 PCIE_ERR(pcie_dev,
6820 "PCIe: RC%d fail to enable PCIe link in resume.\n",
6821 pcie_dev->rc_idx);
6822 return ret;
6823 }
6824
6825 pcie_dev->suspending = false;
6826 PCIE_DBG(pcie_dev,
6827 "dev->bus->number = %d dev->bus->primary = %d\n",
6828 dev->bus->number, dev->bus->primary);
6829
6830 if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
6831 PCIE_DBG(pcie_dev,
6832 "RC%d: entry of PCI framework restore state\n",
6833 pcie_dev->rc_idx);
6834
6835 pci_load_and_free_saved_state(dev,
6836 &pcie_dev->saved_state);
6837 pci_restore_state(dev);
6838
6839 PCIE_DBG(pcie_dev,
6840 "RC%d: exit of PCI framework restore state\n",
6841 pcie_dev->rc_idx);
6842 }
6843
6844 if (pcie_dev->bridge_found) {
6845 PCIE_DBG(pcie_dev,
6846 "RC%d: entry of PCIe recover config\n",
6847 pcie_dev->rc_idx);
6848
6849 msm_pcie_recover_config(dev);
6850
6851 PCIE_DBG(pcie_dev,
6852 "RC%d: exit of PCIe recover config\n",
6853 pcie_dev->rc_idx);
6854 }
6855
6856 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6857
6858 return ret;
6859}
6860
6861void msm_pcie_fixup_resume(struct pci_dev *dev)
6862{
6863 int ret;
6864 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6865
6866 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6867
6868 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6869 pcie_dev->user_suspend)
6870 return;
6871
6872 mutex_lock(&pcie_dev->recovery_lock);
6873 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6874 if (ret)
6875 PCIE_ERR(pcie_dev,
6876 "PCIe: RC%d got failure in fixup resume:%d.\n",
6877 pcie_dev->rc_idx, ret);
6878
6879 mutex_unlock(&pcie_dev->recovery_lock);
6880}
6881DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6882 msm_pcie_fixup_resume);
6883
6884void msm_pcie_fixup_resume_early(struct pci_dev *dev)
6885{
6886 int ret;
6887 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6888
6889 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6890
6891 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6892 pcie_dev->user_suspend)
6893 return;
6894
6895 mutex_lock(&pcie_dev->recovery_lock);
6896 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6897 if (ret)
6898 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
6899 pcie_dev->rc_idx, ret);
6900
6901 mutex_unlock(&pcie_dev->recovery_lock);
6902}
6903DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6904 msm_pcie_fixup_resume_early);
6905
6906int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
6907 void *data, u32 options)
6908{
6909 int i, ret = 0;
6910 struct pci_dev *dev;
6911 u32 rc_idx = 0;
6912 struct msm_pcie_dev_t *pcie_dev;
6913
6914 PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
6915 pm_opt, busnr, options);
6916
6917
6918 if (!user) {
6919 pr_err("PCIe: endpoint device is NULL\n");
6920 ret = -ENODEV;
6921 goto out;
6922 }
6923
6924 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
6925
6926 if (pcie_dev) {
6927 rc_idx = pcie_dev->rc_idx;
6928 PCIE_DBG(pcie_dev,
6929 "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
6930 rc_idx, pm_opt, busnr, options);
6931 } else {
6932 pr_err(
6933 "PCIe: did not find RC for pci endpoint device.\n"
6934 );
6935 ret = -ENODEV;
6936 goto out;
6937 }
6938
6939 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6940 if (!busnr)
6941 break;
6942 if (user == pcie_dev->pcidev_table[i].dev) {
6943 if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
6944 break;
6945
6946 PCIE_ERR(pcie_dev,
6947 "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
6948 pcie_dev->rc_idx, busnr,
6949 pcie_dev->pcidev_table[i].bdf >> 24);
6950 ret = MSM_PCIE_ERROR;
6951 goto out;
6952 }
6953 }
6954
6955 if (i == MAX_DEVICE_NUM) {
6956 PCIE_ERR(pcie_dev,
6957 "PCIe: RC%d: endpoint device was not found in device table",
6958 pcie_dev->rc_idx);
6959 ret = MSM_PCIE_ERROR;
6960 goto out;
6961 }
6962
6963 dev = msm_pcie_dev[rc_idx].dev;
6964
6965 if (!msm_pcie_dev[rc_idx].drv_ready) {
6966 PCIE_ERR(&msm_pcie_dev[rc_idx],
6967 "RC%d has not been successfully probed yet\n",
6968 rc_idx);
6969 return -EPROBE_DEFER;
6970 }
6971
6972 switch (pm_opt) {
6973 case MSM_PCIE_SUSPEND:
6974 PCIE_DBG(&msm_pcie_dev[rc_idx],
6975 "User of RC%d requests to suspend the link\n", rc_idx);
6976 if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
6977 PCIE_DBG(&msm_pcie_dev[rc_idx],
6978 "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
6979 rc_idx, msm_pcie_dev[rc_idx].link_status);
6980
6981 if (!msm_pcie_dev[rc_idx].power_on) {
6982 PCIE_ERR(&msm_pcie_dev[rc_idx],
6983 "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
6984 rc_idx, msm_pcie_dev[rc_idx].link_status);
6985 break;
6986 }
6987
6988 if (msm_pcie_dev[rc_idx].pending_ep_reg) {
6989 PCIE_DBG(&msm_pcie_dev[rc_idx],
6990 "PCIe: RC%d: request to suspend the link is rejected\n",
6991 rc_idx);
6992 break;
6993 }
6994
6995 if (pcie_dev->num_active_ep) {
6996 PCIE_DBG(pcie_dev,
6997 "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
6998 pcie_dev->rc_idx, pcie_dev->num_active_ep);
6999 return ret;
7000 }
7001
7002 msm_pcie_dev[rc_idx].user_suspend = true;
7003
7004 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
7005
7006 ret = msm_pcie_pm_suspend(dev, user, data, options);
7007 if (ret) {
7008 PCIE_ERR(&msm_pcie_dev[rc_idx],
7009 "PCIe: RC%d: user failed to suspend the link.\n",
7010 rc_idx);
7011 msm_pcie_dev[rc_idx].user_suspend = false;
7012 }
7013
7014 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
7015 break;
7016 case MSM_PCIE_RESUME:
7017 PCIE_DBG(&msm_pcie_dev[rc_idx],
7018 "User of RC%d requests to resume the link\n", rc_idx);
7019 if (msm_pcie_dev[rc_idx].link_status !=
7020 MSM_PCIE_LINK_DISABLED) {
7021 PCIE_ERR(&msm_pcie_dev[rc_idx],
7022 "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
7023 rc_idx, msm_pcie_dev[rc_idx].link_status,
7024 msm_pcie_dev[rc_idx].num_active_ep);
7025 break;
7026 }
7027
7028 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
7029 ret = msm_pcie_pm_resume(dev, user, data, options);
7030 if (ret) {
7031 PCIE_ERR(&msm_pcie_dev[rc_idx],
7032 "PCIe: RC%d: user failed to resume the link.\n",
7033 rc_idx);
7034 } else {
7035 PCIE_DBG(&msm_pcie_dev[rc_idx],
7036 "PCIe: RC%d: user succeeded to resume the link.\n",
7037 rc_idx);
7038
7039 msm_pcie_dev[rc_idx].user_suspend = false;
7040 }
7041
7042 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
7043
7044 break;
7045 case MSM_PCIE_DISABLE_PC:
7046 PCIE_DBG(&msm_pcie_dev[rc_idx],
7047 "User of RC%d requests to keep the link always alive.\n",
7048 rc_idx);
7049 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
7050 msm_pcie_dev[rc_idx].irqsave_flags);
7051 if (msm_pcie_dev[rc_idx].suspending) {
7052 PCIE_ERR(&msm_pcie_dev[rc_idx],
7053 "PCIe: RC%d Link has been suspended before request\n",
7054 rc_idx);
7055 ret = MSM_PCIE_ERROR;
7056 } else {
7057 msm_pcie_dev[rc_idx].disable_pc = true;
7058 }
7059 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
7060 msm_pcie_dev[rc_idx].irqsave_flags);
7061 break;
7062 case MSM_PCIE_ENABLE_PC:
7063 PCIE_DBG(&msm_pcie_dev[rc_idx],
7064 "User of RC%d cancels the request of alive link.\n",
7065 rc_idx);
7066 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
7067 msm_pcie_dev[rc_idx].irqsave_flags);
7068 msm_pcie_dev[rc_idx].disable_pc = false;
7069 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
7070 msm_pcie_dev[rc_idx].irqsave_flags);
7071 break;
7072 default:
7073 PCIE_ERR(&msm_pcie_dev[rc_idx],
7074 "PCIe: RC%d: unsupported pm operation:%d.\n",
7075 rc_idx, pm_opt);
7076 ret = -ENODEV;
7077 goto out;
7078 }
7079
7080out:
7081 return ret;
7082}
7083EXPORT_SYMBOL(msm_pcie_pm_control);
7084
7085int msm_pcie_register_event(struct msm_pcie_register_event *reg)
7086{
7087 int i, ret = 0;
7088 struct msm_pcie_dev_t *pcie_dev;
7089
7090 if (!reg) {
7091 pr_err("PCIe: Event registration is NULL\n");
7092 return -ENODEV;
7093 }
7094
7095 if (!reg->user) {
7096 pr_err("PCIe: User of event registration is NULL\n");
7097 return -ENODEV;
7098 }
7099
7100 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
7101
7102 if (!pcie_dev) {
7103 PCIE_ERR(pcie_dev, "%s",
7104 "PCIe: did not find RC for pci endpoint device.\n");
7105 return -ENODEV;
7106 }
7107
7108 if (pcie_dev->num_ep > 1) {
7109 for (i = 0; i < MAX_DEVICE_NUM; i++) {
7110 if (reg->user ==
7111 pcie_dev->pcidev_table[i].dev) {
7112 pcie_dev->event_reg =
7113 pcie_dev->pcidev_table[i].event_reg;
7114
7115 if (!pcie_dev->event_reg) {
7116 pcie_dev->pcidev_table[i].registered =
7117 true;
7118
7119 pcie_dev->num_active_ep++;
7120 PCIE_DBG(pcie_dev,
7121 "PCIe: RC%d: number of active EP(s): %d.\n",
7122 pcie_dev->rc_idx,
7123 pcie_dev->num_active_ep);
7124 }
7125
7126 pcie_dev->event_reg = reg;
7127 pcie_dev->pcidev_table[i].event_reg = reg;
7128 PCIE_DBG(pcie_dev,
7129 "Event 0x%x is registered for RC %d\n",
7130 reg->events,
7131 pcie_dev->rc_idx);
7132
7133 break;
7134 }
7135 }
7136
7137 if (pcie_dev->pending_ep_reg) {
7138 for (i = 0; i < MAX_DEVICE_NUM; i++)
7139 if (!pcie_dev->pcidev_table[i].registered)
7140 break;
7141
7142 if (i == MAX_DEVICE_NUM)
7143 pcie_dev->pending_ep_reg = false;
7144 }
7145 } else {
7146 pcie_dev->event_reg = reg;
7147 PCIE_DBG(pcie_dev,
7148 "Event 0x%x is registered for RC %d\n", reg->events,
7149 pcie_dev->rc_idx);
7150 }
7151
7152 return ret;
7153}
7154EXPORT_SYMBOL(msm_pcie_register_event);
7155
7156int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
7157{
7158 int i, ret = 0;
7159 struct msm_pcie_dev_t *pcie_dev;
7160
7161 if (!reg) {
7162 pr_err("PCIe: Event deregistration is NULL\n");
7163 return -ENODEV;
7164 }
7165
7166 if (!reg->user) {
7167 pr_err("PCIe: User of event deregistration is NULL\n");
7168 return -ENODEV;
7169 }
7170
7171 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
7172
7173 if (!pcie_dev) {
7174 PCIE_ERR(pcie_dev, "%s",
7175 "PCIe: did not find RC for pci endpoint device.\n");
7176 return -ENODEV;
7177 }
7178
7179 if (pcie_dev->num_ep > 1) {
7180 for (i = 0; i < MAX_DEVICE_NUM; i++) {
7181 if (reg->user == pcie_dev->pcidev_table[i].dev) {
7182 if (pcie_dev->pcidev_table[i].event_reg) {
7183 pcie_dev->num_active_ep--;
7184 PCIE_DBG(pcie_dev,
7185 "PCIe: RC%d: number of active EP(s) left: %d.\n",
7186 pcie_dev->rc_idx,
7187 pcie_dev->num_active_ep);
7188 }
7189
7190 pcie_dev->event_reg = NULL;
7191 pcie_dev->pcidev_table[i].event_reg = NULL;
7192 PCIE_DBG(pcie_dev,
7193 "Event is deregistered for RC %d\n",
7194 pcie_dev->rc_idx);
7195
7196 break;
7197 }
7198 }
7199 } else {
7200 pcie_dev->event_reg = NULL;
7201 PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
7202 pcie_dev->rc_idx);
7203 }
7204
7205 return ret;
7206}
7207EXPORT_SYMBOL(msm_pcie_deregister_event);
7208
7209int msm_pcie_recover_config(struct pci_dev *dev)
7210{
7211 int ret = 0;
7212 struct msm_pcie_dev_t *pcie_dev;
7213
7214 if (dev) {
7215 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7216 PCIE_DBG(pcie_dev,
7217 "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
7218 } else {
7219 pr_err("PCIe: the input pci dev is NULL.\n");
7220 return -ENODEV;
7221 }
7222
7223 if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
7224 PCIE_DBG(pcie_dev,
7225 "Recover config space of RC%d and its EP\n",
7226 pcie_dev->rc_idx);
7227 pcie_dev->shadow_en = false;
7228 PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
7229 msm_pcie_cfg_recover(pcie_dev, true);
7230 PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
7231 msm_pcie_cfg_recover(pcie_dev, false);
7232 PCIE_DBG(pcie_dev,
7233 "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
7234 pcie_dev->rc_idx);
7235 pci_save_state(pcie_dev->dev);
7236 pci_save_state(dev);
7237 pcie_dev->shadow_en = true;
7238 PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
7239 pcie_dev->rc_idx);
7240 } else {
7241 PCIE_ERR(pcie_dev,
7242 "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
7243 pcie_dev->rc_idx);
7244 ret = -ENODEV;
7245 }
7246
7247 return ret;
7248}
7249EXPORT_SYMBOL(msm_pcie_recover_config);
7250
7251int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
7252{
7253 int ret = 0;
7254 struct msm_pcie_dev_t *pcie_dev;
7255
7256 if (dev) {
7257 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7258 PCIE_DBG(pcie_dev,
7259 "User requests to %s shadow\n",
7260 enable ? "enable" : "disable");
7261 } else {
7262 pr_err("PCIe: the input pci dev is NULL.\n");
7263 return -ENODEV;
7264 }
7265
7266 PCIE_DBG(pcie_dev,
7267 "The shadowing of RC%d is %s enabled currently.\n",
7268 pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
7269
7270 pcie_dev->shadow_en = enable;
7271
7272 PCIE_DBG(pcie_dev,
7273 "Shadowing of RC%d is turned %s upon user's request.\n",
7274 pcie_dev->rc_idx, enable ? "on" : "off");
7275
7276 return ret;
7277}
7278EXPORT_SYMBOL(msm_pcie_shadow_control);