blob: 87854cf1c686ebf113fe3f270d466280625b4b6a [file] [log] [blame]
Tony Truong349ee492014-10-01 17:35:56 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/gpio.h>
23#include <linux/iopoll.h>
24#include <linux/kernel.h>
25#include <linux/of_pci.h>
26#include <linux/pci.h>
27#include <linux/platform_device.h>
28#include <linux/regulator/consumer.h>
29#include <linux/regulator/rpm-smd-regulator.h>
30#include <linux/slab.h>
31#include <linux/types.h>
32#include <linux/of_gpio.h>
33#include <linux/clk/msm-clk.h>
34#include <linux/reset.h>
35#include <linux/msm-bus.h>
36#include <linux/msm-bus-board.h>
37#include <linux/debugfs.h>
38#include <linux/uaccess.h>
39#include <linux/io.h>
40#include <linux/msi.h>
41#include <linux/interrupt.h>
42#include <linux/irq.h>
43#include <linux/irqdomain.h>
44#include <linux/pm_wakeup.h>
45#include <linux/compiler.h>
46#include <soc/qcom/scm.h>
47#include <linux/ipc_logging.h>
48#include <linux/msm_pcie.h>
49
50#ifdef CONFIG_ARCH_MDMCALIFORNIUM
51#define PCIE_VENDOR_ID_RCP 0x17cb
52#define PCIE_DEVICE_ID_RCP 0x0302
53
54#define PCIE20_L1SUB_CONTROL1 0x158
55#define PCIE20_PARF_DBI_BASE_ADDR 0x350
56#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
57
58#define TX_BASE 0x200
59#define RX_BASE 0x400
60#define PCS_BASE 0x800
61#define PCS_MISC_BASE 0x600
62
63#elif defined(CONFIG_ARCH_MSM8998)
64#define PCIE_VENDOR_ID_RCP 0x17cb
65#define PCIE_DEVICE_ID_RCP 0x0105
66
67#define PCIE20_L1SUB_CONTROL1 0x1E4
68#define PCIE20_PARF_DBI_BASE_ADDR 0x350
69#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
70
71#define TX_BASE 0
72#define RX_BASE 0
73#define PCS_BASE 0x800
74#define PCS_MISC_BASE 0
75
76#else
77#define PCIE_VENDOR_ID_RCP 0x17cb
78#define PCIE_DEVICE_ID_RCP 0x0104
79
80#define PCIE20_L1SUB_CONTROL1 0x158
81#define PCIE20_PARF_DBI_BASE_ADDR 0x168
82#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
83
84#define TX_BASE 0x1000
85#define RX_BASE 0x1200
86#define PCS_BASE 0x1400
87#define PCS_MISC_BASE 0
88#endif
89
90#define TX(n, m) (TX_BASE + n * m * 0x1000)
91#define RX(n, m) (RX_BASE + n * m * 0x1000)
92#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
93#define PCS_MISC_PORT(n, m) (PCS_MISC_BASE + n * m * 0x1000)
94
95#define QSERDES_COM_BG_TIMER 0x00C
96#define QSERDES_COM_SSC_EN_CENTER 0x010
97#define QSERDES_COM_SSC_ADJ_PER1 0x014
98#define QSERDES_COM_SSC_ADJ_PER2 0x018
99#define QSERDES_COM_SSC_PER1 0x01C
100#define QSERDES_COM_SSC_PER2 0x020
101#define QSERDES_COM_SSC_STEP_SIZE1 0x024
102#define QSERDES_COM_SSC_STEP_SIZE2 0x028
103#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
104#define QSERDES_COM_CLK_ENABLE1 0x038
105#define QSERDES_COM_SYS_CLK_CTRL 0x03C
106#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
107#define QSERDES_COM_PLL_IVCO 0x048
108#define QSERDES_COM_LOCK_CMP1_MODE0 0x04C
109#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
110#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
111#define QSERDES_COM_BG_TRIM 0x070
112#define QSERDES_COM_CLK_EP_DIV 0x074
113#define QSERDES_COM_CP_CTRL_MODE0 0x078
114#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
115#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
116#define QSERDES_COM_SYSCLK_EN_SEL 0x0AC
117#define QSERDES_COM_RESETSM_CNTRL 0x0B4
118#define QSERDES_COM_RESTRIM_CTRL 0x0BC
119#define QSERDES_COM_RESCODE_DIV_NUM 0x0C4
120#define QSERDES_COM_LOCK_CMP_EN 0x0C8
121#define QSERDES_COM_DEC_START_MODE0 0x0D0
122#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0DC
123#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0E0
124#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0E4
125#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
126#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10C
127#define QSERDES_COM_VCO_TUNE_CTRL 0x124
128#define QSERDES_COM_VCO_TUNE_MAP 0x128
129#define QSERDES_COM_VCO_TUNE1_MODE0 0x12C
130#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
131#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
132#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
133#define QSERDES_COM_BG_CTRL 0x170
134#define QSERDES_COM_CLK_SELECT 0x174
135#define QSERDES_COM_HSCLK_SEL 0x178
136#define QSERDES_COM_CORECLK_DIV 0x184
137#define QSERDES_COM_CORE_CLK_EN 0x18C
138#define QSERDES_COM_C_READY_STATUS 0x190
139#define QSERDES_COM_CMN_CONFIG 0x194
140#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19C
141#define QSERDES_COM_DEBUG_BUS0 0x1A0
142#define QSERDES_COM_DEBUG_BUS1 0x1A4
143#define QSERDES_COM_DEBUG_BUS2 0x1A8
144#define QSERDES_COM_DEBUG_BUS3 0x1AC
145#define QSERDES_COM_DEBUG_BUS_SEL 0x1B0
146
147#define QSERDES_TX_N_RES_CODE_LANE_OFFSET(n, m) (TX(n, m) + 0x4C)
148#define QSERDES_TX_N_DEBUG_BUS_SEL(n, m) (TX(n, m) + 0x64)
149#define QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(n, m) (TX(n, m) + 0x68)
150#define QSERDES_TX_N_LANE_MODE(n, m) (TX(n, m) + 0x94)
151#define QSERDES_TX_N_RCV_DETECT_LVL_2(n, m) (TX(n, m) + 0xAC)
152
153#define QSERDES_RX_N_UCDR_SO_GAIN_HALF(n, m) (RX(n, m) + 0x010)
154#define QSERDES_RX_N_UCDR_SO_GAIN(n, m) (RX(n, m) + 0x01C)
155#define QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(n, m) (RX(n, m) + 0x048)
156#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(n, m) (RX(n, m) + 0x0D8)
157#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(n, m) (RX(n, m) + 0x0DC)
158#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(n, m) (RX(n, m) + 0x0E0)
159#define QSERDES_RX_N_SIGDET_ENABLES(n, m) (RX(n, m) + 0x110)
160#define QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(n, m) (RX(n, m) + 0x11C)
161#define QSERDES_RX_N_SIGDET_LVL(n, m) (RX(n, m) + 0x118)
162#define QSERDES_RX_N_RX_BAND(n, m) (RX(n, m) + 0x120)
163
164#define PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x00)
165#define PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x04)
166#define PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x08)
167#define PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x0C)
168#define PCIE_MISC_N_DEBUG_BUS_0_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x14)
169#define PCIE_MISC_N_DEBUG_BUS_1_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x18)
170#define PCIE_MISC_N_DEBUG_BUS_2_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x1C)
171#define PCIE_MISC_N_DEBUG_BUS_3_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x20)
172
173#define PCIE_N_SW_RESET(n, m) (PCS_PORT(n, m) + 0x00)
174#define PCIE_N_POWER_DOWN_CONTROL(n, m) (PCS_PORT(n, m) + 0x04)
175#define PCIE_N_START_CONTROL(n, m) (PCS_PORT(n, m) + 0x08)
176#define PCIE_N_TXDEEMPH_M6DB_V0(n, m) (PCS_PORT(n, m) + 0x24)
177#define PCIE_N_TXDEEMPH_M3P5DB_V0(n, m) (PCS_PORT(n, m) + 0x28)
178#define PCIE_N_ENDPOINT_REFCLK_DRIVE(n, m) (PCS_PORT(n, m) + 0x54)
179#define PCIE_N_RX_IDLE_DTCT_CNTRL(n, m) (PCS_PORT(n, m) + 0x58)
180#define PCIE_N_POWER_STATE_CONFIG1(n, m) (PCS_PORT(n, m) + 0x60)
181#define PCIE_N_POWER_STATE_CONFIG4(n, m) (PCS_PORT(n, m) + 0x6C)
182#define PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA0)
183#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA4)
184#define PCIE_N_PLL_LOCK_CHK_DLY_TIME(n, m) (PCS_PORT(n, m) + 0xA8)
185#define PCIE_N_TEST_CONTROL4(n, m) (PCS_PORT(n, m) + 0x11C)
186#define PCIE_N_TEST_CONTROL5(n, m) (PCS_PORT(n, m) + 0x120)
187#define PCIE_N_TEST_CONTROL6(n, m) (PCS_PORT(n, m) + 0x124)
188#define PCIE_N_TEST_CONTROL7(n, m) (PCS_PORT(n, m) + 0x128)
189#define PCIE_N_PCS_STATUS(n, m) (PCS_PORT(n, m) + 0x174)
190#define PCIE_N_DEBUG_BUS_0_STATUS(n, m) (PCS_PORT(n, m) + 0x198)
191#define PCIE_N_DEBUG_BUS_1_STATUS(n, m) (PCS_PORT(n, m) + 0x19C)
192#define PCIE_N_DEBUG_BUS_2_STATUS(n, m) (PCS_PORT(n, m) + 0x1A0)
193#define PCIE_N_DEBUG_BUS_3_STATUS(n, m) (PCS_PORT(n, m) + 0x1A4)
194#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1A8)
195#define PCIE_N_OSC_DTCT_ACTIONS(n, m) (PCS_PORT(n, m) + 0x1AC)
196#define PCIE_N_SIGDET_CNTRL(n, m) (PCS_PORT(n, m) + 0x1B0)
197#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(n, m) (PCS_PORT(n, m) + 0x1DC)
198#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1E0)
199
200#define PCIE_COM_SW_RESET 0x400
201#define PCIE_COM_POWER_DOWN_CONTROL 0x404
202#define PCIE_COM_START_CONTROL 0x408
203#define PCIE_COM_DEBUG_BUS_BYTE0_INDEX 0x438
204#define PCIE_COM_DEBUG_BUS_BYTE1_INDEX 0x43C
205#define PCIE_COM_DEBUG_BUS_BYTE2_INDEX 0x440
206#define PCIE_COM_DEBUG_BUS_BYTE3_INDEX 0x444
207#define PCIE_COM_PCS_READY_STATUS 0x448
208#define PCIE_COM_DEBUG_BUS_0_STATUS 0x45C
209#define PCIE_COM_DEBUG_BUS_1_STATUS 0x460
210#define PCIE_COM_DEBUG_BUS_2_STATUS 0x464
211#define PCIE_COM_DEBUG_BUS_3_STATUS 0x468
212
213#define PCIE20_PARF_SYS_CTRL 0x00
214#define PCIE20_PARF_PM_STTS 0x24
215#define PCIE20_PARF_PCS_DEEMPH 0x34
216#define PCIE20_PARF_PCS_SWING 0x38
217#define PCIE20_PARF_PHY_CTRL 0x40
218#define PCIE20_PARF_PHY_REFCLK 0x4C
219#define PCIE20_PARF_CONFIG_BITS 0x50
220#define PCIE20_PARF_TEST_BUS 0xE4
221#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
222#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
223#define PCIE20_PARF_LTSSM 0x1B0
224#define PCIE20_PARF_INT_ALL_STATUS 0x224
225#define PCIE20_PARF_INT_ALL_CLEAR 0x228
226#define PCIE20_PARF_INT_ALL_MASK 0x22C
227#define PCIE20_PARF_SID_OFFSET 0x234
228#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
229#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
230
231#define PCIE20_ELBI_VERSION 0x00
232#define PCIE20_ELBI_SYS_CTRL 0x04
233#define PCIE20_ELBI_SYS_STTS 0x08
234
235#define PCIE20_CAP 0x70
236#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
237#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
238
239#define PCIE20_COMMAND_STATUS 0x04
240#define PCIE20_HEADER_TYPE 0x0C
241#define PCIE20_BUSNUMBERS 0x18
242#define PCIE20_MEMORY_BASE_LIMIT 0x20
243#define PCIE20_BRIDGE_CTRL 0x3C
244#define PCIE20_DEVICE_CONTROL_STATUS 0x78
245#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
246
247#define PCIE20_AUX_CLK_FREQ_REG 0xB40
248#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
249#define PCIE20_ACK_N_FTS 0xff00
250
251#define PCIE20_PLR_IATU_VIEWPORT 0x900
252#define PCIE20_PLR_IATU_CTRL1 0x904
253#define PCIE20_PLR_IATU_CTRL2 0x908
254#define PCIE20_PLR_IATU_LBAR 0x90C
255#define PCIE20_PLR_IATU_UBAR 0x910
256#define PCIE20_PLR_IATU_LAR 0x914
257#define PCIE20_PLR_IATU_LTAR 0x918
258#define PCIE20_PLR_IATU_UTAR 0x91c
259
260#define PCIE20_CTRL1_TYPE_CFG0 0x04
261#define PCIE20_CTRL1_TYPE_CFG1 0x05
262
263#define PCIE20_CAP_ID 0x10
264#define L1SUB_CAP_ID 0x1E
265
266#define PCIE_CAP_PTR_OFFSET 0x34
267#define PCIE_EXT_CAP_OFFSET 0x100
268
269#define PCIE20_AER_UNCORR_ERR_STATUS_REG 0x104
270#define PCIE20_AER_CORR_ERR_STATUS_REG 0x110
271#define PCIE20_AER_ROOT_ERR_STATUS_REG 0x130
272#define PCIE20_AER_ERR_SRC_ID_REG 0x134
273
274#define RD 0
275#define WR 1
276#define MSM_PCIE_ERROR -1
277
278#define PERST_PROPAGATION_DELAY_US_MIN 1000
279#define PERST_PROPAGATION_DELAY_US_MAX 1005
280#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
281#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
282#define LINK_UP_TIMEOUT_US_MIN 5000
283#define LINK_UP_TIMEOUT_US_MAX 5100
284#define LINK_UP_CHECK_MAX_COUNT 20
285#define PHY_STABILIZATION_DELAY_US_MIN 995
286#define PHY_STABILIZATION_DELAY_US_MAX 1005
287#define POWER_DOWN_DELAY_US_MIN 10
288#define POWER_DOWN_DELAY_US_MAX 11
289#define LINKDOWN_INIT_WAITING_US_MIN 995
290#define LINKDOWN_INIT_WAITING_US_MAX 1005
291#define LINKDOWN_WAITING_US_MIN 4900
292#define LINKDOWN_WAITING_US_MAX 5100
293#define LINKDOWN_WAITING_COUNT 200
294
295#define PHY_READY_TIMEOUT_COUNT 10
296#define XMLH_LINK_UP 0x400
297#define MAX_LINK_RETRIES 5
298#define MAX_BUS_NUM 3
299#define MAX_PROP_SIZE 32
300#define MAX_RC_NAME_LEN 15
301#define MSM_PCIE_MAX_VREG 4
302#define MSM_PCIE_MAX_CLK 9
303#define MSM_PCIE_MAX_PIPE_CLK 1
304#define MAX_RC_NUM 3
305#define MAX_DEVICE_NUM 20
306#define MAX_SHORT_BDF_NUM 16
307#define PCIE_TLP_RD_SIZE 0x5
308#define PCIE_MSI_NR_IRQS 256
309#define MSM_PCIE_MAX_MSI 32
310#define MAX_MSG_LEN 80
311#define PCIE_LOG_PAGES (50)
312#define PCIE_CONF_SPACE_DW 1024
313#define PCIE_CLEAR 0xDEADBEEF
314#define PCIE_LINK_DOWN 0xFFFFFFFF
315
316#define MSM_PCIE_MAX_RESET 4
317#define MSM_PCIE_MAX_PIPE_RESET 1
318
319#define MSM_PCIE_MSI_PHY 0xa0000000
320#define PCIE20_MSI_CTRL_ADDR (0x820)
321#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
322#define PCIE20_MSI_CTRL_INTR_EN (0x828)
323#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
324#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
325#define PCIE20_MSI_CTRL_MAX 8
326
327/* PM control options */
328#define PM_IRQ 0x1
329#define PM_CLK 0x2
330#define PM_GPIO 0x4
331#define PM_VREG 0x8
332#define PM_PIPE_CLK 0x10
333#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
334
335#ifdef CONFIG_PHYS_ADDR_T_64BIT
336#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
337#else
338#define PCIE_UPPER_ADDR(addr) (0x0)
339#endif
340#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
341
342/* Config Space Offsets */
343#define BDF_OFFSET(bus, devfn) \
344 ((bus << 24) | (devfn << 16))
345
346#define PCIE_GEN_DBG(x...) do { \
347 if (msm_pcie_debug_mask) \
348 pr_alert(x); \
349 } while (0)
350
351#define PCIE_DBG(dev, fmt, arg...) do { \
352 if ((dev) && (dev)->ipc_log_long) \
353 ipc_log_string((dev)->ipc_log_long, \
354 "DBG1:%s: " fmt, __func__, arg); \
355 if ((dev) && (dev)->ipc_log) \
356 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
357 if (msm_pcie_debug_mask) \
358 pr_alert("%s: " fmt, __func__, arg); \
359 } while (0)
360
361#define PCIE_DBG2(dev, fmt, arg...) do { \
362 if ((dev) && (dev)->ipc_log) \
363 ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
364 if (msm_pcie_debug_mask) \
365 pr_alert("%s: " fmt, __func__, arg); \
366 } while (0)
367
368#define PCIE_DBG3(dev, fmt, arg...) do { \
369 if ((dev) && (dev)->ipc_log) \
370 ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
371 if (msm_pcie_debug_mask) \
372 pr_alert("%s: " fmt, __func__, arg); \
373 } while (0)
374
375#define PCIE_DUMP(dev, fmt, arg...) do { \
376 if ((dev) && (dev)->ipc_log_dump) \
377 ipc_log_string((dev)->ipc_log_dump, \
378 "DUMP:%s: " fmt, __func__, arg); \
379 } while (0)
380
381#define PCIE_DBG_FS(dev, fmt, arg...) do { \
382 if ((dev) && (dev)->ipc_log_dump) \
383 ipc_log_string((dev)->ipc_log_dump, \
384 "DBG_FS:%s: " fmt, __func__, arg); \
385 pr_alert("%s: " fmt, __func__, arg); \
386 } while (0)
387
388#define PCIE_INFO(dev, fmt, arg...) do { \
389 if ((dev) && (dev)->ipc_log_long) \
390 ipc_log_string((dev)->ipc_log_long, \
391 "INFO:%s: " fmt, __func__, arg); \
392 if ((dev) && (dev)->ipc_log) \
393 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
394 pr_info("%s: " fmt, __func__, arg); \
395 } while (0)
396
397#define PCIE_ERR(dev, fmt, arg...) do { \
398 if ((dev) && (dev)->ipc_log_long) \
399 ipc_log_string((dev)->ipc_log_long, \
400 "ERR:%s: " fmt, __func__, arg); \
401 if ((dev) && (dev)->ipc_log) \
402 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
403 pr_err("%s: " fmt, __func__, arg); \
404 } while (0)
405
406
407enum msm_pcie_res {
408 MSM_PCIE_RES_PARF,
409 MSM_PCIE_RES_PHY,
410 MSM_PCIE_RES_DM_CORE,
411 MSM_PCIE_RES_ELBI,
412 MSM_PCIE_RES_CONF,
413 MSM_PCIE_RES_IO,
414 MSM_PCIE_RES_BARS,
415 MSM_PCIE_RES_TCSR,
416 MSM_PCIE_MAX_RES,
417};
418
419enum msm_pcie_irq {
420 MSM_PCIE_INT_MSI,
421 MSM_PCIE_INT_A,
422 MSM_PCIE_INT_B,
423 MSM_PCIE_INT_C,
424 MSM_PCIE_INT_D,
425 MSM_PCIE_INT_PLS_PME,
426 MSM_PCIE_INT_PME_LEGACY,
427 MSM_PCIE_INT_PLS_ERR,
428 MSM_PCIE_INT_AER_LEGACY,
429 MSM_PCIE_INT_LINK_UP,
430 MSM_PCIE_INT_LINK_DOWN,
431 MSM_PCIE_INT_BRIDGE_FLUSH_N,
432 MSM_PCIE_INT_GLOBAL_INT,
433 MSM_PCIE_MAX_IRQ,
434};
435
436enum msm_pcie_irq_event {
437 MSM_PCIE_INT_EVT_LINK_DOWN = 1,
438 MSM_PCIE_INT_EVT_BME,
439 MSM_PCIE_INT_EVT_PM_TURNOFF,
440 MSM_PCIE_INT_EVT_DEBUG,
441 MSM_PCIE_INT_EVT_LTR,
442 MSM_PCIE_INT_EVT_MHI_Q6,
443 MSM_PCIE_INT_EVT_MHI_A7,
444 MSM_PCIE_INT_EVT_DSTATE_CHANGE,
445 MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
446 MSM_PCIE_INT_EVT_MMIO_WRITE,
447 MSM_PCIE_INT_EVT_CFG_WRITE,
448 MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
449 MSM_PCIE_INT_EVT_LINK_UP,
450 MSM_PCIE_INT_EVT_AER_LEGACY,
451 MSM_PCIE_INT_EVT_AER_ERR,
452 MSM_PCIE_INT_EVT_PME_LEGACY,
453 MSM_PCIE_INT_EVT_PLS_PME,
454 MSM_PCIE_INT_EVT_INTD,
455 MSM_PCIE_INT_EVT_INTC,
456 MSM_PCIE_INT_EVT_INTB,
457 MSM_PCIE_INT_EVT_INTA,
458 MSM_PCIE_INT_EVT_EDMA,
459 MSM_PCIE_INT_EVT_MSI_0,
460 MSM_PCIE_INT_EVT_MSI_1,
461 MSM_PCIE_INT_EVT_MSI_2,
462 MSM_PCIE_INT_EVT_MSI_3,
463 MSM_PCIE_INT_EVT_MSI_4,
464 MSM_PCIE_INT_EVT_MSI_5,
465 MSM_PCIE_INT_EVT_MSI_6,
466 MSM_PCIE_INT_EVT_MSI_7,
467 MSM_PCIE_INT_EVT_MAX = 30,
468};
469
470enum msm_pcie_gpio {
471 MSM_PCIE_GPIO_PERST,
472 MSM_PCIE_GPIO_WAKE,
473 MSM_PCIE_GPIO_EP,
474 MSM_PCIE_MAX_GPIO
475};
476
477enum msm_pcie_link_status {
478 MSM_PCIE_LINK_DEINIT,
479 MSM_PCIE_LINK_ENABLED,
480 MSM_PCIE_LINK_DISABLED
481};
482
483/* gpio info structure */
484struct msm_pcie_gpio_info_t {
485 char *name;
486 uint32_t num;
487 bool out;
488 uint32_t on;
489 uint32_t init;
490 bool required;
491};
492
493/* voltage regulator info structrue */
494struct msm_pcie_vreg_info_t {
495 struct regulator *hdl;
496 char *name;
497 uint32_t max_v;
498 uint32_t min_v;
499 uint32_t opt_mode;
500 bool required;
501};
502
503/* reset info structure */
504struct msm_pcie_reset_info_t {
505 struct reset_control *hdl;
506 char *name;
507 bool required;
508};
509
510/* clock info structure */
511struct msm_pcie_clk_info_t {
512 struct clk *hdl;
513 char *name;
514 u32 freq;
515 bool config_mem;
516 bool required;
517};
518
519/* resource info structure */
520struct msm_pcie_res_info_t {
521 char *name;
522 struct resource *resource;
523 void __iomem *base;
524};
525
526/* irq info structrue */
527struct msm_pcie_irq_info_t {
528 char *name;
529 uint32_t num;
530};
531
532/* phy info structure */
533struct msm_pcie_phy_info_t {
534 u32 offset;
535 u32 val;
536 u32 delay;
537};
538
539/* PCIe device info structure */
540struct msm_pcie_device_info {
541 u32 bdf;
542 struct pci_dev *dev;
543 short short_bdf;
544 u32 sid;
545 int domain;
546 void __iomem *conf_base;
547 unsigned long phy_address;
548 u32 dev_ctrlstts_offset;
549 struct msm_pcie_register_event *event_reg;
550 bool registered;
551};
552
553/* msm pcie device structure */
554struct msm_pcie_dev_t {
555 struct platform_device *pdev;
556 struct pci_dev *dev;
557 struct regulator *gdsc;
558 struct regulator *gdsc_smmu;
559 struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
560 struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
561 struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
562 struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
563 struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
564 struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
565 struct msm_pcie_irq_info_t msi[MSM_PCIE_MAX_MSI];
566 struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
567 struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
568
569 void __iomem *parf;
570 void __iomem *phy;
571 void __iomem *elbi;
572 void __iomem *dm_core;
573 void __iomem *conf;
574 void __iomem *bars;
575 void __iomem *tcsr;
576
577 uint32_t axi_bar_start;
578 uint32_t axi_bar_end;
579
580 struct resource *dev_mem_res;
581 struct resource *dev_io_res;
582
583 uint32_t wake_n;
584 uint32_t vreg_n;
585 uint32_t gpio_n;
586 uint32_t parf_deemph;
587 uint32_t parf_swing;
588
589 bool cfg_access;
590 spinlock_t cfg_lock;
591 unsigned long irqsave_flags;
592 struct mutex enumerate_lock;
593 struct mutex setup_lock;
594
595 struct irq_domain *irq_domain;
596 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
597 uint32_t msi_gicm_addr;
598 uint32_t msi_gicm_base;
599 bool use_msi;
600
601 enum msm_pcie_link_status link_status;
602 bool user_suspend;
603 bool disable_pc;
604 struct pci_saved_state *saved_state;
605
606 struct wakeup_source ws;
607 struct msm_bus_scale_pdata *bus_scale_table;
608 uint32_t bus_client;
609
610 bool l0s_supported;
611 bool l1_supported;
612 bool l1ss_supported;
613 bool common_clk_en;
614 bool clk_power_manage_en;
615 bool aux_clk_sync;
616 bool aer_enable;
617 bool smmu_exist;
618 uint32_t smmu_sid_base;
619 uint32_t n_fts;
620 bool ext_ref_clk;
621 bool common_phy;
622 uint32_t ep_latency;
623 uint32_t wr_halt_size;
624 uint32_t cpl_timeout;
625 uint32_t current_bdf;
626 short current_short_bdf;
627 uint32_t perst_delay_us_min;
628 uint32_t perst_delay_us_max;
629 uint32_t tlp_rd_size;
630 bool linkdown_panic;
631 bool ep_wakeirq;
632
633 uint32_t rc_idx;
634 uint32_t phy_ver;
635 bool drv_ready;
636 bool enumerated;
637 struct work_struct handle_wake_work;
638 struct mutex recovery_lock;
639 spinlock_t linkdown_lock;
640 spinlock_t wakeup_lock;
641 spinlock_t global_irq_lock;
642 spinlock_t aer_lock;
643 ulong linkdown_counter;
644 ulong link_turned_on_counter;
645 ulong link_turned_off_counter;
646 ulong rc_corr_counter;
647 ulong rc_non_fatal_counter;
648 ulong rc_fatal_counter;
649 ulong ep_corr_counter;
650 ulong ep_non_fatal_counter;
651 ulong ep_fatal_counter;
652 bool suspending;
653 ulong wake_counter;
654 u32 num_active_ep;
655 u32 num_ep;
656 bool pending_ep_reg;
657 u32 phy_len;
658 u32 port_phy_len;
659 struct msm_pcie_phy_info_t *phy_sequence;
660 struct msm_pcie_phy_info_t *port_phy_sequence;
661 u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
662 u32 rc_shadow[PCIE_CONF_SPACE_DW];
663 bool shadow_en;
664 bool bridge_found;
665 struct msm_pcie_register_event *event_reg;
666 unsigned int scm_dev_id;
667 bool power_on;
668 void *ipc_log;
669 void *ipc_log_long;
670 void *ipc_log_dump;
671 bool use_19p2mhz_aux_clk;
672 bool use_pinctrl;
673 struct pinctrl *pinctrl;
674 struct pinctrl_state *pins_default;
675 struct pinctrl_state *pins_sleep;
676 struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
677};
678
679
680/* debug mask sys interface */
681static int msm_pcie_debug_mask;
682module_param_named(debug_mask, msm_pcie_debug_mask,
683 int, 0644);
684
685/* debugfs values */
686static u32 rc_sel;
687static u32 base_sel;
688static u32 wr_offset;
689static u32 wr_mask;
690static u32 wr_value;
691static ulong corr_counter_limit = 5;
692
693/* counter to keep track if common PHY needs to be configured */
694static u32 num_rc_on;
695
696/* global lock for PCIe common PHY */
697static struct mutex com_phy_lock;
698
699/* Table to track info of PCIe devices */
700static struct msm_pcie_device_info
701 msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
702
703/* PCIe driver state */
704struct pcie_drv_sta {
705 u32 rc_num;
706 struct mutex drv_lock;
707} pcie_drv;
708
709/* msm pcie device data */
710static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
711
712/* regulators */
713static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
714 {NULL, "vreg-3.3", 0, 0, 0, false},
715 {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
716 {NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
717 {NULL, "vreg-cx", 0, 0, 0, false}
718};
719
720/* GPIOs */
721static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
722 {"perst-gpio", 0, 1, 0, 0, 1},
723 {"wake-gpio", 0, 0, 0, 0, 0},
724 {"qcom,ep-gpio", 0, 1, 1, 0, 0}
725};
726
727/* resets */
728static struct msm_pcie_reset_info_t
729msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
730 {
731 {NULL, "pcie_phy_reset", false},
732 {NULL, "pcie_phy_com_reset", false},
733 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
734 {NULL, "pcie_0_phy_reset", false}
735 },
736 {
737 {NULL, "pcie_phy_reset", false},
738 {NULL, "pcie_phy_com_reset", false},
739 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
740 {NULL, "pcie_1_phy_reset", false}
741 },
742 {
743 {NULL, "pcie_phy_reset", false},
744 {NULL, "pcie_phy_com_reset", false},
745 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
746 {NULL, "pcie_2_phy_reset", false}
747 }
748};
749
750/* pipe reset */
751static struct msm_pcie_reset_info_t
752msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
753 {
754 {NULL, "pcie_0_phy_pipe_reset", false}
755 },
756 {
757 {NULL, "pcie_1_phy_pipe_reset", false}
758 },
759 {
760 {NULL, "pcie_2_phy_pipe_reset", false}
761 }
762};
763
764/* clocks */
765static struct msm_pcie_clk_info_t
766 msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
767 {
768 {NULL, "pcie_0_ref_clk_src", 0, false, false},
769 {NULL, "pcie_0_aux_clk", 1010000, false, true},
770 {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
771 {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
772 {NULL, "pcie_0_slv_axi_clk", 0, true, true},
773 {NULL, "pcie_0_ldo", 0, false, true},
774 {NULL, "pcie_0_smmu_clk", 0, false, false},
775 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
776 {NULL, "pcie_phy_aux_clk", 0, false, false}
777 },
778 {
779 {NULL, "pcie_1_ref_clk_src", 0, false, false},
780 {NULL, "pcie_1_aux_clk", 1010000, false, true},
781 {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
782 {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
783 {NULL, "pcie_1_slv_axi_clk", 0, true, true},
784 {NULL, "pcie_1_ldo", 0, false, true},
785 {NULL, "pcie_1_smmu_clk", 0, false, false},
786 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
787 {NULL, "pcie_phy_aux_clk", 0, false, false}
788 },
789 {
790 {NULL, "pcie_2_ref_clk_src", 0, false, false},
791 {NULL, "pcie_2_aux_clk", 1010000, false, true},
792 {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
793 {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
794 {NULL, "pcie_2_slv_axi_clk", 0, true, true},
795 {NULL, "pcie_2_ldo", 0, false, true},
796 {NULL, "pcie_2_smmu_clk", 0, false, false},
797 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
798 {NULL, "pcie_phy_aux_clk", 0, false, false}
799 }
800};
801
802/* Pipe Clocks */
803static struct msm_pcie_clk_info_t
804 msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
805 {
806 {NULL, "pcie_0_pipe_clk", 125000000, true, true},
807 },
808 {
809 {NULL, "pcie_1_pipe_clk", 125000000, true, true},
810 },
811 {
812 {NULL, "pcie_2_pipe_clk", 125000000, true, true},
813 }
814};
815
816/* resources */
817static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
818 {"parf", 0, 0},
819 {"phy", 0, 0},
820 {"dm_core", 0, 0},
821 {"elbi", 0, 0},
822 {"conf", 0, 0},
823 {"io", 0, 0},
824 {"bars", 0, 0},
825 {"tcsr", 0, 0}
826};
827
828/* irqs */
829static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
830 {"int_msi", 0},
831 {"int_a", 0},
832 {"int_b", 0},
833 {"int_c", 0},
834 {"int_d", 0},
835 {"int_pls_pme", 0},
836 {"int_pme_legacy", 0},
837 {"int_pls_err", 0},
838 {"int_aer_legacy", 0},
839 {"int_pls_link_up", 0},
840 {"int_pls_link_down", 0},
841 {"int_bridge_flush_n", 0},
842 {"int_global_int", 0}
843};
844
845/* MSIs */
846static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
847 {"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
848 {"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
849 {"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
850 {"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
851 {"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
852 {"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
853 {"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
854 {"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
855};
856
857#ifdef CONFIG_ARM
858#define PCIE_BUS_PRIV_DATA(bus) \
859 (((struct pci_sys_data *)bus->sysdata)->private_data)
860
861static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
862
863static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
864{
865 msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
866 msm_pcie_sys_data[dev->rc_idx].private_data = dev;
867
868 return &msm_pcie_sys_data[dev->rc_idx];
869}
870
871static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
872{
873 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
874}
875#else
876#define PCIE_BUS_PRIV_DATA(bus) \
877 (struct msm_pcie_dev_t *)(bus->sysdata)
878
879static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
880{
881 return dev;
882}
883
884static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
885{
886}
887#endif
888
889static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value)
890{
891 writel_relaxed(value, base + offset);
892 /* ensure that changes propagated to the hardware */
893 wmb();
894}
895
896static inline void msm_pcie_write_reg_field(void *base, u32 offset,
897 const u32 mask, u32 val)
898{
899 u32 shift = find_first_bit((void *)&mask, 32);
900 u32 tmp = readl_relaxed(base + offset);
901
902 tmp &= ~mask; /* clear written bits */
903 val = tmp | (val << shift);
904 writel_relaxed(val, base + offset);
905 /* ensure that changes propagated to the hardware */
906 wmb();
907}
908
909static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
910 struct msm_pcie_clk_info_t *info)
911{
912 int ret;
913
914 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
915 if (ret)
916 PCIE_ERR(dev,
917 "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
918 dev->rc_idx, info->name, ret);
919 else
920 PCIE_DBG2(dev,
921 "PCIe: RC%d configured core memory for clk %s.\n",
922 dev->rc_idx, info->name);
923
924 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
925 if (ret)
926 PCIE_ERR(dev,
927 "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
928 dev->rc_idx, info->name, ret);
929 else
930 PCIE_DBG2(dev,
931 "PCIe: RC%d configured peripheral memory for clk %s.\n",
932 dev->rc_idx, info->name);
933}
934
935#if defined(CONFIG_ARCH_FSM9010)
936#define PCIE20_PARF_PHY_STTS 0x3c
937#define PCIE2_PHY_RESET_CTRL 0x44
938#define PCIE20_PARF_PHY_REFCLK_CTRL2 0xa0
939#define PCIE20_PARF_PHY_REFCLK_CTRL3 0xa4
940#define PCIE20_PARF_PCS_SWING_CTRL1 0x88
941#define PCIE20_PARF_PCS_SWING_CTRL2 0x8c
942#define PCIE20_PARF_PCS_DEEMPH1 0x74
943#define PCIE20_PARF_PCS_DEEMPH2 0x78
944#define PCIE20_PARF_PCS_DEEMPH3 0x7c
945#define PCIE20_PARF_CONFIGBITS 0x84
946#define PCIE20_PARF_PHY_CTRL3 0x94
947#define PCIE20_PARF_PCS_CTRL 0x80
948
949#define TX_AMP_VAL 127
950#define PHY_RX0_EQ_GEN1_VAL 0
951#define PHY_RX0_EQ_GEN2_VAL 4
952#define TX_DEEMPH_GEN1_VAL 24
953#define TX_DEEMPH_GEN2_3_5DB_VAL 24
954#define TX_DEEMPH_GEN2_6DB_VAL 34
955#define PHY_TX0_TERM_OFFST_VAL 0
956
957static inline void pcie_phy_dump(struct msm_pcie_dev_t *dev)
958{
959}
960
961static inline void pcie20_phy_reset(struct msm_pcie_dev_t *dev, uint32_t assert)
962{
963 msm_pcie_write_reg_field(dev->phy, PCIE2_PHY_RESET_CTRL,
964 BIT(0), (assert) ? 1 : 0);
965}
966
967static void pcie_phy_init(struct msm_pcie_dev_t *dev)
968{
969 PCIE_DBG(dev, "RC%d: Initializing 28LP SNS phy - 100MHz\n",
970 dev->rc_idx);
971
972 /* De-assert Phy SW Reset */
973 pcie20_phy_reset(dev, 1);
974
975 /* Program SSP ENABLE */
976 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL2) & BIT(0))
977 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2,
978 BIT(0), 0);
979 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL3) &
980 BIT(0)) == 0)
981 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL3,
982 BIT(0), 1);
983 /* Program Tx Amplitude */
984 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL1) &
985 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
986 TX_AMP_VAL)
987 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL1,
988 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
989 TX_AMP_VAL);
990 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL2) &
991 (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
992 TX_AMP_VAL)
993 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL2,
994 BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
995 TX_AMP_VAL);
996 /* Program De-Emphasis */
997 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH1) &
998 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
999 TX_DEEMPH_GEN2_6DB_VAL)
1000 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH1,
1001 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1002 TX_DEEMPH_GEN2_6DB_VAL);
1003
1004 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH2) &
1005 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1006 TX_DEEMPH_GEN2_3_5DB_VAL)
1007 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH2,
1008 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1009 TX_DEEMPH_GEN2_3_5DB_VAL);
1010
1011 if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH3) &
1012 (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1013 TX_DEEMPH_GEN1_VAL)
1014 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH3,
1015 BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1016 TX_DEEMPH_GEN1_VAL);
1017
1018 /* Program Rx_Eq */
1019 if ((readl_relaxed(dev->phy + PCIE20_PARF_CONFIGBITS) &
1020 (BIT(2)|BIT(1)|BIT(0))) != PHY_RX0_EQ_GEN1_VAL)
1021 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_CONFIGBITS,
1022 BIT(2)|BIT(1)|BIT(0), PHY_RX0_EQ_GEN1_VAL);
1023
1024 /* Program Tx0_term_offset */
1025 if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_CTRL3) &
1026 (BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
1027 PHY_TX0_TERM_OFFST_VAL)
1028 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_CTRL3,
1029 BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
1030 PHY_TX0_TERM_OFFST_VAL);
1031
1032 /* Program REF_CLK source */
1033 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2, BIT(1),
1034 (dev->ext_ref_clk) ? 1 : 0);
1035 /* disable Tx2Rx Loopback */
1036 if (readl_relaxed(dev->phy + PCIE20_PARF_PCS_CTRL) & BIT(1))
1037 msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_CTRL,
1038 BIT(1), 0);
1039 /* De-assert Phy SW Reset */
1040 pcie20_phy_reset(dev, 0);
1041}
1042
1043static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1044{
1045
1046 /* read PCIE20_PARF_PHY_STTS twice */
1047 readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS);
1048 if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS) & BIT(0))
1049 return false;
1050 else
1051 return true;
1052}
1053#else
1054static void pcie_phy_dump_test_cntrl(struct msm_pcie_dev_t *dev,
1055 u32 cntrl4_val, u32 cntrl5_val,
1056 u32 cntrl6_val, u32 cntrl7_val)
1057{
1058 msm_pcie_write_reg(dev->phy,
1059 PCIE_N_TEST_CONTROL4(dev->rc_idx, dev->common_phy), cntrl4_val);
1060 msm_pcie_write_reg(dev->phy,
1061 PCIE_N_TEST_CONTROL5(dev->rc_idx, dev->common_phy), cntrl5_val);
1062 msm_pcie_write_reg(dev->phy,
1063 PCIE_N_TEST_CONTROL6(dev->rc_idx, dev->common_phy), cntrl6_val);
1064 msm_pcie_write_reg(dev->phy,
1065 PCIE_N_TEST_CONTROL7(dev->rc_idx, dev->common_phy), cntrl7_val);
1066
1067 PCIE_DUMP(dev,
1068 "PCIe: RC%d PCIE_N_TEST_CONTROL4: 0x%x\n", dev->rc_idx,
1069 readl_relaxed(dev->phy +
1070 PCIE_N_TEST_CONTROL4(dev->rc_idx,
1071 dev->common_phy)));
1072 PCIE_DUMP(dev,
1073 "PCIe: RC%d PCIE_N_TEST_CONTROL5: 0x%x\n", dev->rc_idx,
1074 readl_relaxed(dev->phy +
1075 PCIE_N_TEST_CONTROL5(dev->rc_idx,
1076 dev->common_phy)));
1077 PCIE_DUMP(dev,
1078 "PCIe: RC%d PCIE_N_TEST_CONTROL6: 0x%x\n", dev->rc_idx,
1079 readl_relaxed(dev->phy +
1080 PCIE_N_TEST_CONTROL6(dev->rc_idx,
1081 dev->common_phy)));
1082 PCIE_DUMP(dev,
1083 "PCIe: RC%d PCIE_N_TEST_CONTROL7: 0x%x\n", dev->rc_idx,
1084 readl_relaxed(dev->phy +
1085 PCIE_N_TEST_CONTROL7(dev->rc_idx,
1086 dev->common_phy)));
1087 PCIE_DUMP(dev,
1088 "PCIe: RC%d PCIE_N_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rc_idx,
1089 readl_relaxed(dev->phy +
1090 PCIE_N_DEBUG_BUS_0_STATUS(dev->rc_idx,
1091 dev->common_phy)));
1092 PCIE_DUMP(dev,
1093 "PCIe: RC%d PCIE_N_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rc_idx,
1094 readl_relaxed(dev->phy +
1095 PCIE_N_DEBUG_BUS_1_STATUS(dev->rc_idx,
1096 dev->common_phy)));
1097 PCIE_DUMP(dev,
1098 "PCIe: RC%d PCIE_N_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rc_idx,
1099 readl_relaxed(dev->phy +
1100 PCIE_N_DEBUG_BUS_2_STATUS(dev->rc_idx,
1101 dev->common_phy)));
1102 PCIE_DUMP(dev,
1103 "PCIe: RC%d PCIE_N_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rc_idx,
1104 readl_relaxed(dev->phy +
1105 PCIE_N_DEBUG_BUS_3_STATUS(dev->rc_idx,
1106 dev->common_phy)));
1107}
1108
1109static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
1110{
1111 int i, size;
1112 u32 write_val;
1113
1114 if (dev->phy_ver >= 0x20) {
1115 PCIE_DUMP(dev, "PCIe: RC%d PHY dump is not supported\n",
1116 dev->rc_idx);
1117 return;
1118 }
1119
1120 PCIE_DUMP(dev, "PCIe: RC%d PHY testbus\n", dev->rc_idx);
1121
1122 pcie_phy_dump_test_cntrl(dev, 0x18, 0x19, 0x1A, 0x1B);
1123 pcie_phy_dump_test_cntrl(dev, 0x1C, 0x1D, 0x1E, 0x1F);
1124 pcie_phy_dump_test_cntrl(dev, 0x20, 0x21, 0x22, 0x23);
1125
1126 for (i = 0; i < 3; i++) {
1127 write_val = 0x1 + i;
1128 msm_pcie_write_reg(dev->phy,
1129 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1130 dev->common_phy), write_val);
1131 PCIE_DUMP(dev,
1132 "PCIe: RC%d QSERDES_TX_N_DEBUG_BUS_SEL: 0x%x\n",
1133 dev->rc_idx,
1134 readl_relaxed(dev->phy +
1135 QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
1136 dev->common_phy)));
1137
1138 pcie_phy_dump_test_cntrl(dev, 0x30, 0x31, 0x32, 0x33);
1139 }
1140
1141 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1142
1143 if (dev->phy_ver >= 0x10 && dev->phy_ver < 0x20) {
1144 pcie_phy_dump_test_cntrl(dev, 0x01, 0x02, 0x03, 0x0A);
1145 pcie_phy_dump_test_cntrl(dev, 0x0E, 0x0F, 0x12, 0x13);
1146 pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
1147
1148 for (i = 0; i < 8; i += 4) {
1149 write_val = 0x1 + i;
1150 msm_pcie_write_reg(dev->phy,
1151 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(dev->rc_idx,
1152 dev->common_phy), write_val);
1153 msm_pcie_write_reg(dev->phy,
1154 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(dev->rc_idx,
1155 dev->common_phy), write_val + 1);
1156 msm_pcie_write_reg(dev->phy,
1157 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(dev->rc_idx,
1158 dev->common_phy), write_val + 2);
1159 msm_pcie_write_reg(dev->phy,
1160 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(dev->rc_idx,
1161 dev->common_phy), write_val + 3);
1162
1163 PCIE_DUMP(dev,
1164 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1165 dev->rc_idx,
1166 readl_relaxed(dev->phy +
1167 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1168 dev->rc_idx, dev->common_phy)));
1169 PCIE_DUMP(dev,
1170 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1171 dev->rc_idx,
1172 readl_relaxed(dev->phy +
1173 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1174 dev->rc_idx, dev->common_phy)));
1175 PCIE_DUMP(dev,
1176 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1177 dev->rc_idx,
1178 readl_relaxed(dev->phy +
1179 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1180 dev->rc_idx, dev->common_phy)));
1181 PCIE_DUMP(dev,
1182 "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1183 dev->rc_idx,
1184 readl_relaxed(dev->phy +
1185 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1186 dev->rc_idx, dev->common_phy)));
1187 PCIE_DUMP(dev,
1188 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_0_STATUS: 0x%x\n",
1189 dev->rc_idx,
1190 readl_relaxed(dev->phy +
1191 PCIE_MISC_N_DEBUG_BUS_0_STATUS(
1192 dev->rc_idx, dev->common_phy)));
1193 PCIE_DUMP(dev,
1194 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_1_STATUS: 0x%x\n",
1195 dev->rc_idx,
1196 readl_relaxed(dev->phy +
1197 PCIE_MISC_N_DEBUG_BUS_1_STATUS(
1198 dev->rc_idx, dev->common_phy)));
1199 PCIE_DUMP(dev,
1200 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_2_STATUS: 0x%x\n",
1201 dev->rc_idx,
1202 readl_relaxed(dev->phy +
1203 PCIE_MISC_N_DEBUG_BUS_2_STATUS(
1204 dev->rc_idx, dev->common_phy)));
1205 PCIE_DUMP(dev,
1206 "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_3_STATUS: 0x%x\n",
1207 dev->rc_idx,
1208 readl_relaxed(dev->phy +
1209 PCIE_MISC_N_DEBUG_BUS_3_STATUS(
1210 dev->rc_idx, dev->common_phy)));
1211 }
1212
1213 msm_pcie_write_reg(dev->phy,
1214 PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
1215 dev->rc_idx, dev->common_phy), 0);
1216 msm_pcie_write_reg(dev->phy,
1217 PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
1218 dev->rc_idx, dev->common_phy), 0);
1219 msm_pcie_write_reg(dev->phy,
1220 PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
1221 dev->rc_idx, dev->common_phy), 0);
1222 msm_pcie_write_reg(dev->phy,
1223 PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
1224 dev->rc_idx, dev->common_phy), 0);
1225 }
1226
1227 for (i = 0; i < 2; i++) {
1228 write_val = 0x2 + i;
1229
1230 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
1231 write_val);
1232
1233 PCIE_DUMP(dev,
1234 "PCIe: RC%d to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
1235 dev->rc_idx,
1236 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
1237 PCIE_DUMP(dev,
1238 "PCIe: RC%d QSERDES_COM_DEBUG_BUS0: 0x%x\n",
1239 dev->rc_idx,
1240 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
1241 PCIE_DUMP(dev,
1242 "PCIe: RC%d QSERDES_COM_DEBUG_BUS1: 0x%x\n",
1243 dev->rc_idx,
1244 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
1245 PCIE_DUMP(dev,
1246 "PCIe: RC%d QSERDES_COM_DEBUG_BUS2: 0x%x\n",
1247 dev->rc_idx,
1248 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
1249 PCIE_DUMP(dev,
1250 "PCIe: RC%d QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
1251 dev->rc_idx,
1252 readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
1253 }
1254
1255 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
1256
1257 if (dev->common_phy) {
1258 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1259 0x01);
1260 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE1_INDEX,
1261 0x02);
1262 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE2_INDEX,
1263 0x03);
1264 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE3_INDEX,
1265 0x04);
1266
1267 PCIE_DUMP(dev,
1268 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1269 dev->rc_idx,
1270 readl_relaxed(dev->phy +
1271 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1272 PCIE_DUMP(dev,
1273 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
1274 dev->rc_idx,
1275 readl_relaxed(dev->phy +
1276 PCIE_COM_DEBUG_BUS_BYTE1_INDEX));
1277 PCIE_DUMP(dev,
1278 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
1279 dev->rc_idx,
1280 readl_relaxed(dev->phy +
1281 PCIE_COM_DEBUG_BUS_BYTE2_INDEX));
1282 PCIE_DUMP(dev,
1283 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
1284 dev->rc_idx,
1285 readl_relaxed(dev->phy +
1286 PCIE_COM_DEBUG_BUS_BYTE3_INDEX));
1287 PCIE_DUMP(dev,
1288 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n",
1289 dev->rc_idx,
1290 readl_relaxed(dev->phy +
1291 PCIE_COM_DEBUG_BUS_0_STATUS));
1292 PCIE_DUMP(dev,
1293 "PCIe: RC%d PCIE_COM_DEBUG_BUS_1_STATUS: 0x%x\n",
1294 dev->rc_idx,
1295 readl_relaxed(dev->phy +
1296 PCIE_COM_DEBUG_BUS_1_STATUS));
1297 PCIE_DUMP(dev,
1298 "PCIe: RC%d PCIE_COM_DEBUG_BUS_2_STATUS: 0x%x\n",
1299 dev->rc_idx,
1300 readl_relaxed(dev->phy +
1301 PCIE_COM_DEBUG_BUS_2_STATUS));
1302 PCIE_DUMP(dev,
1303 "PCIe: RC%d PCIE_COM_DEBUG_BUS_3_STATUS: 0x%x\n",
1304 dev->rc_idx,
1305 readl_relaxed(dev->phy +
1306 PCIE_COM_DEBUG_BUS_3_STATUS));
1307
1308 msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
1309 0x05);
1310
1311 PCIE_DUMP(dev,
1312 "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
1313 dev->rc_idx,
1314 readl_relaxed(dev->phy +
1315 PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
1316 PCIE_DUMP(dev,
1317 "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n\n",
1318 dev->rc_idx,
1319 readl_relaxed(dev->phy +
1320 PCIE_COM_DEBUG_BUS_0_STATUS));
1321 }
1322
1323 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
1324 for (i = 0; i < size; i += 32) {
1325 PCIE_DUMP(dev,
1326 "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1327 dev->rc_idx, i,
1328 readl_relaxed(dev->phy + i),
1329 readl_relaxed(dev->phy + (i + 4)),
1330 readl_relaxed(dev->phy + (i + 8)),
1331 readl_relaxed(dev->phy + (i + 12)),
1332 readl_relaxed(dev->phy + (i + 16)),
1333 readl_relaxed(dev->phy + (i + 20)),
1334 readl_relaxed(dev->phy + (i + 24)),
1335 readl_relaxed(dev->phy + (i + 28)));
1336 }
1337}
1338
1339#ifdef CONFIG_ARCH_MDMCALIFORNIUM
1340static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1341{
1342 u8 common_phy;
1343
1344 PCIE_DBG(dev,
1345 "RC%d: Initializing MDM 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1346 dev->rc_idx);
1347
1348 if (dev->common_phy)
1349 common_phy = 1;
1350 else
1351 common_phy = 0;
1352
1353 msm_pcie_write_reg(dev->phy,
1354 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1355 0x01);
1356 msm_pcie_write_reg(dev->phy,
1357 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1358 0x03);
1359
1360 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
1361 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1362
1363 msm_pcie_write_reg(dev->phy,
1364 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy), 0x06);
1365
1366 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
1367 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1368 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1369 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1370 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1371 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1372 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1373 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1374 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
1375 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1376 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1377
1378 if (dev->tcsr) {
1379 PCIE_DBG(dev, "RC%d: TCSR PHY clock scheme is 0x%x\n",
1380 dev->rc_idx, readl_relaxed(dev->tcsr));
1381
1382 if (readl_relaxed(dev->tcsr) & (BIT(1) | BIT(0)))
1383 msm_pcie_write_reg(dev->phy,
1384 QSERDES_COM_SYSCLK_EN_SEL, 0x0A);
1385 else
1386 msm_pcie_write_reg(dev->phy,
1387 QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1388 }
1389
1390 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1391 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1392 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1393 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1394 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1395 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x0D);
1396 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x04);
1397 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1398 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1399 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1400 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1401 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1402 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1403 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1404 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1405
1406 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1407 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1408 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1409 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1410 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1411 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1412 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1413
1414 msm_pcie_write_reg(dev->phy,
1415 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1416 common_phy), 0x45);
1417
1418 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1419
1420 msm_pcie_write_reg(dev->phy,
1421 QSERDES_TX_N_RES_CODE_LANE_OFFSET(dev->rc_idx, common_phy),
1422 0x02);
1423 msm_pcie_write_reg(dev->phy,
1424 QSERDES_TX_N_RCV_DETECT_LVL_2(dev->rc_idx, common_phy),
1425 0x12);
1426
1427 msm_pcie_write_reg(dev->phy,
1428 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1429 0x1C);
1430 msm_pcie_write_reg(dev->phy,
1431 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1432 0x14);
1433 msm_pcie_write_reg(dev->phy,
1434 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1435 0x01);
1436 msm_pcie_write_reg(dev->phy,
1437 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1438 0x00);
1439 msm_pcie_write_reg(dev->phy,
1440 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1441 0xDB);
1442 msm_pcie_write_reg(dev->phy,
1443 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1444 common_phy),
1445 0x4B);
1446 msm_pcie_write_reg(dev->phy,
1447 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1448 0x04);
1449 msm_pcie_write_reg(dev->phy,
1450 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1451 0x04);
1452
1453 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1454
1455 msm_pcie_write_reg(dev->phy,
1456 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1457 0x04);
1458 msm_pcie_write_reg(dev->phy,
1459 PCIE_N_OSC_DTCT_ACTIONS(dev->rc_idx, common_phy),
1460 0x00);
1461 msm_pcie_write_reg(dev->phy,
1462 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1463 0x40);
1464 msm_pcie_write_reg(dev->phy,
1465 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1466 0x00);
1467 msm_pcie_write_reg(dev->phy,
1468 PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(dev->rc_idx, common_phy),
1469 0x40);
1470 msm_pcie_write_reg(dev->phy,
1471 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
1472 0x00);
1473 msm_pcie_write_reg(dev->phy,
1474 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1475 0x40);
1476 msm_pcie_write_reg(dev->phy,
1477 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1478 0x73);
1479 msm_pcie_write_reg(dev->phy,
1480 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1481 0x99);
1482 msm_pcie_write_reg(dev->phy,
1483 PCIE_N_TXDEEMPH_M6DB_V0(dev->rc_idx, common_phy),
1484 0x15);
1485 msm_pcie_write_reg(dev->phy,
1486 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1487 0x0E);
1488
1489 msm_pcie_write_reg(dev->phy,
1490 PCIE_N_SIGDET_CNTRL(dev->rc_idx, common_phy),
1491 0x07);
1492
1493 msm_pcie_write_reg(dev->phy,
1494 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1495 0x00);
1496 msm_pcie_write_reg(dev->phy,
1497 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1498 0x03);
1499}
1500
1501static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1502{
1503}
1504
1505static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1506{
1507 if (readl_relaxed(dev->phy +
1508 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) & BIT(6))
1509 return false;
1510 else
1511 return true;
1512}
1513#else
1514static void pcie_phy_init(struct msm_pcie_dev_t *dev)
1515{
1516 int i;
1517 struct msm_pcie_phy_info_t *phy_seq;
1518
1519 PCIE_DBG(dev,
1520 "RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
1521 dev->rc_idx);
1522
1523 if (dev->phy_sequence) {
1524 i = dev->phy_len;
1525 phy_seq = dev->phy_sequence;
1526 while (i--) {
1527 msm_pcie_write_reg(dev->phy,
1528 phy_seq->offset,
1529 phy_seq->val);
1530 if (phy_seq->delay)
1531 usleep_range(phy_seq->delay,
1532 phy_seq->delay + 1);
1533 phy_seq++;
1534 }
1535 return;
1536 }
1537
1538 if (dev->common_phy)
1539 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0x01);
1540
1541 msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1C);
1542 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1543 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1544 msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
1545 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x42);
1546 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
1547 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
1548 msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
1549 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x01);
1550 msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
1551 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x00);
1552 msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
1553 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
1554 msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
1555 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
1556 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
1557 msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
1558 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
1559 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
1560 msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
1561 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
1562 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
1563 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
1564 msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x04);
1565 msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
1566 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
1567 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
1568 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
1569 msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
1570 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
1571 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
1572 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
1573 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
1574 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
1575 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
1576 msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
1577
1578 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
1579 msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
1580 msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
1581
1582 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
1583 msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
1584
1585 if (dev->phy_ver == 0x3) {
1586 msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
1587 msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x40);
1588 }
1589
1590 if (dev->common_phy) {
1591 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x00);
1592 msm_pcie_write_reg(dev->phy, PCIE_COM_START_CONTROL, 0x03);
1593 }
1594}
1595
1596static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
1597{
1598 int i;
1599 struct msm_pcie_phy_info_t *phy_seq;
1600 u8 common_phy;
1601
1602 if (dev->phy_ver >= 0x20)
1603 return;
1604
1605 PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
1606
1607 if (dev->common_phy)
1608 common_phy = 1;
1609 else
1610 common_phy = 0;
1611
1612 if (dev->port_phy_sequence) {
1613 i = dev->port_phy_len;
1614 phy_seq = dev->port_phy_sequence;
1615 while (i--) {
1616 msm_pcie_write_reg(dev->phy,
1617 phy_seq->offset,
1618 phy_seq->val);
1619 if (phy_seq->delay)
1620 usleep_range(phy_seq->delay,
1621 phy_seq->delay + 1);
1622 phy_seq++;
1623 }
1624 return;
1625 }
1626
1627 msm_pcie_write_reg(dev->phy,
1628 QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
1629 common_phy), 0x45);
1630 msm_pcie_write_reg(dev->phy,
1631 QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy),
1632 0x06);
1633
1634 msm_pcie_write_reg(dev->phy,
1635 QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
1636 0x1C);
1637 msm_pcie_write_reg(dev->phy,
1638 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1639 0x17);
1640 msm_pcie_write_reg(dev->phy,
1641 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
1642 0x01);
1643 msm_pcie_write_reg(dev->phy,
1644 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
1645 0x00);
1646 msm_pcie_write_reg(dev->phy,
1647 QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
1648 0xDB);
1649 msm_pcie_write_reg(dev->phy,
1650 QSERDES_RX_N_RX_BAND(dev->rc_idx, common_phy),
1651 0x18);
1652 msm_pcie_write_reg(dev->phy,
1653 QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
1654 0x04);
1655 msm_pcie_write_reg(dev->phy,
1656 QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
1657 0x04);
1658 msm_pcie_write_reg(dev->phy,
1659 PCIE_N_RX_IDLE_DTCT_CNTRL(dev->rc_idx, common_phy),
1660 0x4C);
1661 msm_pcie_write_reg(dev->phy,
1662 PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1663 0x00);
1664 msm_pcie_write_reg(dev->phy,
1665 PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
1666 0x01);
1667 msm_pcie_write_reg(dev->phy,
1668 PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
1669 0x05);
1670 msm_pcie_write_reg(dev->phy,
1671 QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
1672 common_phy), 0x4B);
1673 msm_pcie_write_reg(dev->phy,
1674 QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
1675 0x14);
1676
1677 msm_pcie_write_reg(dev->phy,
1678 PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
1679 0x05);
1680 msm_pcie_write_reg(dev->phy,
1681 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1682 0x02);
1683 msm_pcie_write_reg(dev->phy,
1684 PCIE_N_POWER_STATE_CONFIG4(dev->rc_idx, common_phy),
1685 0x00);
1686 msm_pcie_write_reg(dev->phy,
1687 PCIE_N_POWER_STATE_CONFIG1(dev->rc_idx, common_phy),
1688 0xA3);
1689
1690 if (dev->phy_ver == 0x3) {
1691 msm_pcie_write_reg(dev->phy,
1692 QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
1693 0x19);
1694
1695 msm_pcie_write_reg(dev->phy,
1696 PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
1697 0x0E);
1698 }
1699
1700 msm_pcie_write_reg(dev->phy,
1701 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
1702 0x03);
1703 usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
1704
1705 msm_pcie_write_reg(dev->phy,
1706 PCIE_N_SW_RESET(dev->rc_idx, common_phy),
1707 0x00);
1708 msm_pcie_write_reg(dev->phy,
1709 PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
1710 0x0A);
1711}
1712
1713static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
1714{
1715 if (dev->phy_ver >= 0x20) {
1716 if (readl_relaxed(dev->phy +
1717 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) &
1718 BIT(6))
1719 return false;
1720 else
1721 return true;
1722 }
1723
1724 if (!(readl_relaxed(dev->phy + PCIE_COM_PCS_READY_STATUS) & 0x1))
1725 return false;
1726 else
1727 return true;
1728}
1729#endif
1730#endif
1731
1732static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
1733{
1734 int ret, scm_ret;
1735
1736 if (!dev) {
1737 pr_err("PCIe: the input pcie dev is NULL.\n");
1738 return -ENODEV;
1739 }
1740
1741 ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
1742 if (ret || scm_ret) {
1743 PCIE_ERR(dev,
1744 "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
1745 dev->rc_idx, ret, scm_ret);
1746 return ret ? ret : -EINVAL;
1747 }
1748
1749 return 0;
1750}
1751
1752static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
1753 u32 offset)
1754{
1755 if (offset % 4) {
1756 PCIE_ERR(dev,
1757 "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
1758 dev->rc_idx, offset);
1759 return MSM_PCIE_ERROR;
1760 }
1761
1762 return 0;
1763}
1764
1765static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
1766 bool check_sw_stts,
1767 bool check_ep,
1768 void __iomem *ep_conf)
1769{
1770 u32 val;
1771
1772 if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
1773 PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
1774 dev->rc_idx);
1775 return false;
1776 }
1777
1778 if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
1779 PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
1780 dev->rc_idx);
1781 return false;
1782 }
1783
1784 val = readl_relaxed(dev->dm_core);
1785 PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
1786 dev->rc_idx, val);
1787 if (val == PCIE_LINK_DOWN) {
1788 PCIE_ERR(dev,
1789 "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
1790 dev->rc_idx, dev->rc_idx, val);
1791 return false;
1792 }
1793
1794 if (check_ep) {
1795 val = readl_relaxed(ep_conf);
1796 PCIE_DBG(dev,
1797 "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
1798 dev->rc_idx, val);
1799 if (val == PCIE_LINK_DOWN) {
1800 PCIE_ERR(dev,
1801 "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
1802 dev->rc_idx, dev->rc_idx, val);
1803 return false;
1804 }
1805 }
1806
1807 return true;
1808}
1809
1810static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
1811{
1812 int i, j;
1813 u32 val = 0;
1814 u32 *shadow;
1815 void *cfg = dev->conf;
1816
1817 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1818 if (!rc && !dev->pcidev_table[i].bdf)
1819 break;
1820 if (rc) {
1821 cfg = dev->dm_core;
1822 shadow = dev->rc_shadow;
1823 } else {
1824 if (!msm_pcie_confirm_linkup(dev, false, true,
1825 dev->pcidev_table[i].conf_base))
1826 continue;
1827
1828 shadow = dev->ep_shadow[i];
1829 PCIE_DBG(dev,
1830 "PCIe Device: %02x:%02x.%01x\n",
1831 dev->pcidev_table[i].bdf >> 24,
1832 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1833 dev->pcidev_table[i].bdf >> 16 & 0x07);
1834 }
1835 for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
1836 val = shadow[j];
1837 if (val != PCIE_CLEAR) {
1838 PCIE_DBG3(dev,
1839 "PCIe: before recovery:cfg 0x%x:0x%x\n",
1840 j * 4, readl_relaxed(cfg + j * 4));
1841 PCIE_DBG3(dev,
1842 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1843 j, j * 4, val);
1844 writel_relaxed(val, cfg + j * 4);
1845 /* ensure changes propagated to the hardware */
1846 wmb();
1847 PCIE_DBG3(dev,
1848 "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
1849 j * 4, readl_relaxed(cfg + j * 4));
1850 }
1851 }
1852 if (rc)
1853 break;
1854
1855 pci_save_state(dev->pcidev_table[i].dev);
1856 cfg += SZ_4K;
1857 }
1858}
1859
1860static void msm_pcie_write_mask(void __iomem *addr,
1861 uint32_t clear_mask, uint32_t set_mask)
1862{
1863 uint32_t val;
1864
1865 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
1866 writel_relaxed(val, addr);
1867 wmb(); /* ensure data is written to hardware register */
1868}
1869
1870static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
1871{
1872 int i, size;
1873 u32 original;
1874
1875 PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
1876
1877 original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
1878 for (i = 1; i <= 0x1A; i++) {
1879 msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
1880 0xFF0000, i << 16);
1881 PCIE_DUMP(dev,
1882 "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
1883 dev->rc_idx,
1884 readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
1885 readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
1886 }
1887 writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
1888
1889 PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
1890
1891 size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
1892 for (i = 0; i < size; i += 32) {
1893 PCIE_DUMP(dev,
1894 "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1895 dev->rc_idx, i,
1896 readl_relaxed(dev->parf + i),
1897 readl_relaxed(dev->parf + (i + 4)),
1898 readl_relaxed(dev->parf + (i + 8)),
1899 readl_relaxed(dev->parf + (i + 12)),
1900 readl_relaxed(dev->parf + (i + 16)),
1901 readl_relaxed(dev->parf + (i + 20)),
1902 readl_relaxed(dev->parf + (i + 24)),
1903 readl_relaxed(dev->parf + (i + 28)));
1904 }
1905}
1906
1907static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
1908{
1909 PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
1910 dev->rc_idx, dev->enumerated ? "" : "not");
1911 PCIE_DBG_FS(dev, "PCIe: link is %s\n",
1912 (dev->link_status == MSM_PCIE_LINK_ENABLED)
1913 ? "enabled" : "disabled");
1914 PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
1915 dev->cfg_access ? "" : "not");
1916 PCIE_DBG_FS(dev, "use_msi is %d\n",
1917 dev->use_msi);
1918 PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
1919 dev->use_pinctrl);
1920 PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
1921 dev->use_19p2mhz_aux_clk);
1922 PCIE_DBG_FS(dev, "user_suspend is %d\n",
1923 dev->user_suspend);
1924 PCIE_DBG_FS(dev, "num_ep: %d\n",
1925 dev->num_ep);
1926 PCIE_DBG_FS(dev, "num_active_ep: %d\n",
1927 dev->num_active_ep);
1928 PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
1929 dev->pending_ep_reg ? "true" : "false");
1930 PCIE_DBG_FS(dev, "phy_len is %d",
1931 dev->phy_len);
1932 PCIE_DBG_FS(dev, "port_phy_len is %d",
1933 dev->port_phy_len);
1934 PCIE_DBG_FS(dev, "disable_pc is %d",
1935 dev->disable_pc);
1936 PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
1937 dev->l0s_supported ? "" : "not");
1938 PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
1939 dev->l1_supported ? "" : "not");
1940 PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
1941 dev->l1ss_supported ? "" : "not");
1942 PCIE_DBG_FS(dev, "common_clk_en is %d\n",
1943 dev->common_clk_en);
1944 PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
1945 dev->clk_power_manage_en);
1946 PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
1947 dev->aux_clk_sync);
1948 PCIE_DBG_FS(dev, "AER is %s enable\n",
1949 dev->aer_enable ? "" : "not");
1950 PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
1951 dev->ext_ref_clk);
1952 PCIE_DBG_FS(dev, "ep_wakeirq is %d\n",
1953 dev->ep_wakeirq);
1954 PCIE_DBG_FS(dev, "phy_ver is %d\n",
1955 dev->phy_ver);
1956 PCIE_DBG_FS(dev, "drv_ready is %d\n",
1957 dev->drv_ready);
1958 PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
1959 dev->linkdown_panic);
1960 PCIE_DBG_FS(dev, "the link is %s suspending\n",
1961 dev->suspending ? "" : "not");
1962 PCIE_DBG_FS(dev, "shadow is %s enabled\n",
1963 dev->shadow_en ? "" : "not");
1964 PCIE_DBG_FS(dev, "the power of RC is %s on\n",
1965 dev->power_on ? "" : "not");
1966 PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
1967 dev->msi_gicm_addr);
1968 PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
1969 dev->msi_gicm_base);
1970 PCIE_DBG_FS(dev, "bus_client: %d\n",
1971 dev->bus_client);
1972 PCIE_DBG_FS(dev, "current short bdf: %d\n",
1973 dev->current_short_bdf);
1974 PCIE_DBG_FS(dev, "smmu does %s exist\n",
1975 dev->smmu_exist ? "" : "not");
1976 PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
1977 dev->smmu_sid_base);
1978 PCIE_DBG_FS(dev, "n_fts: %d\n",
1979 dev->n_fts);
1980 PCIE_DBG_FS(dev, "common_phy: %d\n",
1981 dev->common_phy);
1982 PCIE_DBG_FS(dev, "ep_latency: %dms\n",
1983 dev->ep_latency);
1984 PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
1985 dev->wr_halt_size);
1986 PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
1987 dev->cpl_timeout);
1988 PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
1989 dev->current_bdf);
1990 PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
1991 dev->perst_delay_us_min);
1992 PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
1993 dev->perst_delay_us_max);
1994 PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
1995 dev->tlp_rd_size);
1996 PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
1997 dev->rc_corr_counter);
1998 PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
1999 dev->rc_non_fatal_counter);
2000 PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
2001 dev->rc_fatal_counter);
2002 PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
2003 dev->ep_corr_counter);
2004 PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
2005 dev->ep_non_fatal_counter);
2006 PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
2007 dev->ep_fatal_counter);
2008 PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
2009 dev->linkdown_counter);
2010 PCIE_DBG_FS(dev, "wake_counter: %lu\n",
2011 dev->wake_counter);
2012 PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
2013 dev->link_turned_on_counter);
2014 PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
2015 dev->link_turned_off_counter);
2016}
2017
2018static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
2019{
2020 int i, j;
2021 u32 val = 0;
2022 u32 *shadow;
2023
2024 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2025 if (!rc && !dev->pcidev_table[i].bdf)
2026 break;
2027 if (rc) {
2028 shadow = dev->rc_shadow;
2029 } else {
2030 shadow = dev->ep_shadow[i];
2031 PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
2032 dev->pcidev_table[i].bdf >> 24,
2033 dev->pcidev_table[i].bdf >> 19 & 0x1f,
2034 dev->pcidev_table[i].bdf >> 16 & 0x07);
2035 }
2036 for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
2037 val = shadow[j];
2038 if (val != PCIE_CLEAR) {
2039 PCIE_DBG_FS(dev,
2040 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
2041 j, j * 4, val);
2042 }
2043 }
2044 if (rc)
2045 break;
2046 }
2047}
2048
2049static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
2050 u32 testcase)
2051{
2052 int ret, i;
2053 u32 base_sel_size = 0;
2054 u32 val = 0;
2055 u32 current_offset = 0;
2056 u32 ep_l1sub_ctrl1_offset = 0;
2057 u32 ep_l1sub_cap_reg1_offset = 0;
2058 u32 ep_link_ctrlstts_offset = 0;
2059 u32 ep_dev_ctrl2stts2_offset = 0;
2060
2061 if (testcase >= 5 && testcase <= 10) {
2062 current_offset =
2063 readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
2064
2065 while (current_offset) {
2066 val = readl_relaxed(dev->conf + current_offset);
2067 if ((val & 0xff) == PCIE20_CAP_ID) {
2068 ep_link_ctrlstts_offset = current_offset +
2069 0x10;
2070 ep_dev_ctrl2stts2_offset = current_offset +
2071 0x28;
2072 break;
2073 }
2074 current_offset = (val >> 8) & 0xff;
2075 }
2076
2077 if (!ep_link_ctrlstts_offset)
2078 PCIE_DBG(dev,
2079 "RC%d endpoint does not support PCIe capability registers\n",
2080 dev->rc_idx);
2081 else
2082 PCIE_DBG(dev,
2083 "RC%d: ep_link_ctrlstts_offset: 0x%x\n",
2084 dev->rc_idx, ep_link_ctrlstts_offset);
2085 }
2086
2087 switch (testcase) {
2088 case 0: /* output status */
2089 PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
2090 dev->rc_idx);
2091 msm_pcie_show_status(dev);
2092 break;
2093 case 1: /* disable link */
2094 PCIE_DBG_FS(dev,
2095 "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
2096 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2097 dev->dev, NULL,
2098 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2099 if (ret)
2100 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2101 __func__);
2102 else
2103 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
2104 __func__);
2105 break;
2106 case 2: /* enable link and recover config space for RC and EP */
2107 PCIE_DBG_FS(dev,
2108 "\n\nPCIe: RC%d: enable link and recover config space\n\n",
2109 dev->rc_idx);
2110 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2111 dev->dev, NULL,
2112 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2113 if (ret)
2114 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2115 __func__);
2116 else {
2117 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2118 msm_pcie_recover_config(dev->dev);
2119 }
2120 break;
2121 case 3: /*
2122 * disable and enable link, recover config space for
2123 * RC and EP
2124 */
2125 PCIE_DBG_FS(dev,
2126 "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
2127 dev->rc_idx);
2128 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
2129 dev->dev, NULL,
2130 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2131 if (ret)
2132 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
2133 __func__);
2134 else
2135 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
2136 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
2137 dev->dev, NULL,
2138 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
2139 if (ret)
2140 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
2141 __func__);
2142 else {
2143 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
2144 msm_pcie_recover_config(dev->dev);
2145 }
2146 break;
2147 case 4: /* dump shadow registers for RC and EP */
2148 PCIE_DBG_FS(dev,
2149 "\n\nPCIe: RC%d: dumping RC shadow registers\n",
2150 dev->rc_idx);
2151 msm_pcie_shadow_dump(dev, true);
2152
2153 PCIE_DBG_FS(dev,
2154 "\n\nPCIe: RC%d: dumping EP shadow registers\n",
2155 dev->rc_idx);
2156 msm_pcie_shadow_dump(dev, false);
2157 break;
2158 case 5: /* disable L0s */
2159 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
2160 dev->rc_idx);
2161 msm_pcie_write_mask(dev->dm_core +
2162 PCIE20_CAP_LINKCTRLSTATUS,
2163 BIT(0), 0);
2164 msm_pcie_write_mask(dev->conf +
2165 ep_link_ctrlstts_offset,
2166 BIT(0), 0);
2167 if (dev->shadow_en) {
2168 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2169 readl_relaxed(dev->dm_core +
2170 PCIE20_CAP_LINKCTRLSTATUS);
2171 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2172 readl_relaxed(dev->conf +
2173 ep_link_ctrlstts_offset);
2174 }
2175 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2176 readl_relaxed(dev->dm_core +
2177 PCIE20_CAP_LINKCTRLSTATUS));
2178 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2179 readl_relaxed(dev->conf +
2180 ep_link_ctrlstts_offset));
2181 break;
2182 case 6: /* enable L0s */
2183 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
2184 dev->rc_idx);
2185 msm_pcie_write_mask(dev->dm_core +
2186 PCIE20_CAP_LINKCTRLSTATUS,
2187 0, BIT(0));
2188 msm_pcie_write_mask(dev->conf +
2189 ep_link_ctrlstts_offset,
2190 0, BIT(0));
2191 if (dev->shadow_en) {
2192 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2193 readl_relaxed(dev->dm_core +
2194 PCIE20_CAP_LINKCTRLSTATUS);
2195 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2196 readl_relaxed(dev->conf +
2197 ep_link_ctrlstts_offset);
2198 }
2199 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2200 readl_relaxed(dev->dm_core +
2201 PCIE20_CAP_LINKCTRLSTATUS));
2202 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2203 readl_relaxed(dev->conf +
2204 ep_link_ctrlstts_offset));
2205 break;
2206 case 7: /* disable L1 */
2207 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
2208 dev->rc_idx);
2209 msm_pcie_write_mask(dev->dm_core +
2210 PCIE20_CAP_LINKCTRLSTATUS,
2211 BIT(1), 0);
2212 msm_pcie_write_mask(dev->conf +
2213 ep_link_ctrlstts_offset,
2214 BIT(1), 0);
2215 if (dev->shadow_en) {
2216 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2217 readl_relaxed(dev->dm_core +
2218 PCIE20_CAP_LINKCTRLSTATUS);
2219 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2220 readl_relaxed(dev->conf +
2221 ep_link_ctrlstts_offset);
2222 }
2223 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2224 readl_relaxed(dev->dm_core +
2225 PCIE20_CAP_LINKCTRLSTATUS));
2226 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2227 readl_relaxed(dev->conf +
2228 ep_link_ctrlstts_offset));
2229 break;
2230 case 8: /* enable L1 */
2231 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
2232 dev->rc_idx);
2233 msm_pcie_write_mask(dev->dm_core +
2234 PCIE20_CAP_LINKCTRLSTATUS,
2235 0, BIT(1));
2236 msm_pcie_write_mask(dev->conf +
2237 ep_link_ctrlstts_offset,
2238 0, BIT(1));
2239 if (dev->shadow_en) {
2240 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
2241 readl_relaxed(dev->dm_core +
2242 PCIE20_CAP_LINKCTRLSTATUS);
2243 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
2244 readl_relaxed(dev->conf +
2245 ep_link_ctrlstts_offset);
2246 }
2247 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
2248 readl_relaxed(dev->dm_core +
2249 PCIE20_CAP_LINKCTRLSTATUS));
2250 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
2251 readl_relaxed(dev->conf +
2252 ep_link_ctrlstts_offset));
2253 break;
2254 case 9: /* disable L1ss */
2255 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
2256 dev->rc_idx);
2257 current_offset = PCIE_EXT_CAP_OFFSET;
2258 while (current_offset) {
2259 val = readl_relaxed(dev->conf + current_offset);
2260 if ((val & 0xffff) == L1SUB_CAP_ID) {
2261 ep_l1sub_ctrl1_offset =
2262 current_offset + 0x8;
2263 break;
2264 }
2265 current_offset = val >> 20;
2266 }
2267 if (!ep_l1sub_ctrl1_offset) {
2268 PCIE_DBG_FS(dev,
2269 "PCIe: RC%d endpoint does not support l1ss registers\n",
2270 dev->rc_idx);
2271 break;
2272 }
2273
2274 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2275 dev->rc_idx, ep_l1sub_ctrl1_offset);
2276
2277 msm_pcie_write_reg_field(dev->dm_core,
2278 PCIE20_L1SUB_CONTROL1,
2279 0xf, 0);
2280 msm_pcie_write_mask(dev->dm_core +
2281 PCIE20_DEVICE_CONTROL2_STATUS2,
2282 BIT(10), 0);
2283 msm_pcie_write_reg_field(dev->conf,
2284 ep_l1sub_ctrl1_offset,
2285 0xf, 0);
2286 msm_pcie_write_mask(dev->conf +
2287 ep_dev_ctrl2stts2_offset,
2288 BIT(10), 0);
2289 if (dev->shadow_en) {
2290 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2291 readl_relaxed(dev->dm_core +
2292 PCIE20_L1SUB_CONTROL1);
2293 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2294 readl_relaxed(dev->dm_core +
2295 PCIE20_DEVICE_CONTROL2_STATUS2);
2296 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2297 readl_relaxed(dev->conf +
2298 ep_l1sub_ctrl1_offset);
2299 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2300 readl_relaxed(dev->conf +
2301 ep_dev_ctrl2stts2_offset);
2302 }
2303 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2304 readl_relaxed(dev->dm_core +
2305 PCIE20_L1SUB_CONTROL1));
2306 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2307 readl_relaxed(dev->dm_core +
2308 PCIE20_DEVICE_CONTROL2_STATUS2));
2309 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2310 readl_relaxed(dev->conf +
2311 ep_l1sub_ctrl1_offset));
2312 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2313 readl_relaxed(dev->conf +
2314 ep_dev_ctrl2stts2_offset));
2315 break;
2316 case 10: /* enable L1ss */
2317 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
2318 dev->rc_idx);
2319 current_offset = PCIE_EXT_CAP_OFFSET;
2320 while (current_offset) {
2321 val = readl_relaxed(dev->conf + current_offset);
2322 if ((val & 0xffff) == L1SUB_CAP_ID) {
2323 ep_l1sub_cap_reg1_offset =
2324 current_offset + 0x4;
2325 ep_l1sub_ctrl1_offset =
2326 current_offset + 0x8;
2327 break;
2328 }
2329 current_offset = val >> 20;
2330 }
2331 if (!ep_l1sub_ctrl1_offset) {
2332 PCIE_DBG_FS(dev,
2333 "PCIe: RC%d endpoint does not support l1ss registers\n",
2334 dev->rc_idx);
2335 break;
2336 }
2337
2338 val = readl_relaxed(dev->conf +
2339 ep_l1sub_cap_reg1_offset);
2340
2341 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
2342 val);
2343 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
2344 dev->rc_idx, ep_l1sub_ctrl1_offset);
2345
2346 val &= 0xf;
2347
2348 msm_pcie_write_reg_field(dev->dm_core,
2349 PCIE20_L1SUB_CONTROL1,
2350 0xf, val);
2351 msm_pcie_write_mask(dev->dm_core +
2352 PCIE20_DEVICE_CONTROL2_STATUS2,
2353 0, BIT(10));
2354 msm_pcie_write_reg_field(dev->conf,
2355 ep_l1sub_ctrl1_offset,
2356 0xf, val);
2357 msm_pcie_write_mask(dev->conf +
2358 ep_dev_ctrl2stts2_offset,
2359 0, BIT(10));
2360 if (dev->shadow_en) {
2361 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
2362 readl_relaxed(dev->dm_core +
2363 PCIE20_L1SUB_CONTROL1);
2364 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
2365 readl_relaxed(dev->dm_core +
2366 PCIE20_DEVICE_CONTROL2_STATUS2);
2367 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
2368 readl_relaxed(dev->conf +
2369 ep_l1sub_ctrl1_offset);
2370 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
2371 readl_relaxed(dev->conf +
2372 ep_dev_ctrl2stts2_offset);
2373 }
2374 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
2375 readl_relaxed(dev->dm_core +
2376 PCIE20_L1SUB_CONTROL1));
2377 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
2378 readl_relaxed(dev->dm_core +
2379 PCIE20_DEVICE_CONTROL2_STATUS2));
2380 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
2381 readl_relaxed(dev->conf +
2382 ep_l1sub_ctrl1_offset));
2383 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
2384 readl_relaxed(dev->conf +
2385 ep_dev_ctrl2stts2_offset));
2386 break;
2387 case 11: /* enumerate PCIe */
2388 PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
2389 dev->rc_idx);
2390 if (dev->enumerated)
2391 PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
2392 dev->rc_idx);
2393 else {
2394 if (!msm_pcie_enumerate(dev->rc_idx))
2395 PCIE_DBG_FS(dev,
2396 "PCIe: RC%d is successfully enumerated\n",
2397 dev->rc_idx);
2398 else
2399 PCIE_DBG_FS(dev,
2400 "PCIe: RC%d enumeration failed\n",
2401 dev->rc_idx);
2402 }
2403 break;
2404 case 12: /* write a value to a register */
2405 PCIE_DBG_FS(dev,
2406 "\n\nPCIe: RC%d: writing a value to a register\n\n",
2407 dev->rc_idx);
2408
2409 if (!base_sel) {
2410 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2411 break;
2412 }
2413
2414 PCIE_DBG_FS(dev,
2415 "base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
2416 dev->res[base_sel - 1].name,
2417 dev->res[base_sel - 1].base,
2418 wr_offset, wr_mask, wr_value);
2419
Tony Truong95747382017-01-06 14:03:03 -08002420 base_sel_size = resource_size(dev->res[base_sel - 1].resource);
2421
2422 if (wr_offset > base_sel_size - 4 ||
2423 msm_pcie_check_align(dev, wr_offset))
2424 PCIE_DBG_FS(dev,
2425 "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
2426 dev->rc_idx, wr_offset, base_sel_size - 4);
2427 else
2428 msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
2429 wr_offset, wr_mask, wr_value);
Tony Truong349ee492014-10-01 17:35:56 -07002430
2431 break;
2432 case 13: /* dump all registers of base_sel */
2433 if (!base_sel) {
2434 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
2435 break;
2436 } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
2437 pcie_parf_dump(dev);
2438 break;
2439 } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
2440 pcie_phy_dump(dev);
2441 break;
2442 } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
2443 base_sel_size = 0x1000;
2444 } else {
2445 base_sel_size = resource_size(
2446 dev->res[base_sel - 1].resource);
2447 }
2448
2449 PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
2450 dev->res[base_sel - 1].name, dev->rc_idx);
2451
2452 for (i = 0; i < base_sel_size; i += 32) {
2453 PCIE_DBG_FS(dev,
2454 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2455 i, readl_relaxed(dev->res[base_sel - 1].base + i),
2456 readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
2457 readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
2458 readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
2459 readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
2460 readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
2461 readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
2462 readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
2463 }
2464 break;
2465 default:
2466 PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
2467 break;
2468 }
2469}
2470
2471int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
2472 u32 offset, u32 mask, u32 value)
2473{
2474 int ret = 0;
2475 struct msm_pcie_dev_t *pdev = NULL;
2476
2477 if (!dev) {
2478 pr_err("PCIe: the input pci dev is NULL.\n");
2479 return -ENODEV;
2480 }
2481
2482 if (option == 12 || option == 13) {
2483 if (!base || base > 5) {
2484 PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
2485 PCIE_DBG_FS(pdev,
2486 "PCIe: base_sel is still 0x%x\n", base_sel);
2487 return -EINVAL;
2488 }
2489
2490 base_sel = base;
2491 PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
2492
2493 if (option == 12) {
2494 wr_offset = offset;
2495 wr_mask = mask;
2496 wr_value = value;
2497
2498 PCIE_DBG_FS(pdev,
2499 "PCIe: wr_offset is now 0x%x\n", wr_offset);
2500 PCIE_DBG_FS(pdev,
2501 "PCIe: wr_mask is now 0x%x\n", wr_mask);
2502 PCIE_DBG_FS(pdev,
2503 "PCIe: wr_value is now 0x%x\n", wr_value);
2504 }
2505 }
2506
2507 pdev = PCIE_BUS_PRIV_DATA(dev->bus);
2508 rc_sel = 1 << pdev->rc_idx;
2509
2510 msm_pcie_sel_debug_testcase(pdev, option);
2511
2512 return ret;
2513}
2514EXPORT_SYMBOL(msm_pcie_debug_info);
2515
Tony Truongbd9a3412017-02-27 18:30:13 -08002516#ifdef CONFIG_SYSFS
2517static ssize_t msm_pcie_enumerate_store(struct device *dev,
2518 struct device_attribute *attr,
2519 const char *buf, size_t count)
2520{
2521 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
2522 dev_get_drvdata(dev);
2523
2524 if (pcie_dev)
2525 msm_pcie_enumerate(pcie_dev->rc_idx);
2526
2527 return count;
2528}
2529
2530static DEVICE_ATTR(enumerate, 0200, NULL, msm_pcie_enumerate_store);
2531
2532static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
2533{
2534 int ret;
2535
2536 ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
2537 if (ret)
2538 PCIE_DBG_FS(dev,
2539 "RC%d: failed to create sysfs enumerate node\n",
2540 dev->rc_idx);
2541}
2542
2543static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2544{
2545 if (dev->pdev)
2546 device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
2547}
2548#else
2549static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
2550{
2551}
2552
2553static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2554{
2555}
2556#endif
2557
Tony Truong349ee492014-10-01 17:35:56 -07002558#ifdef CONFIG_DEBUG_FS
2559static struct dentry *dent_msm_pcie;
2560static struct dentry *dfile_rc_sel;
2561static struct dentry *dfile_case;
2562static struct dentry *dfile_base_sel;
2563static struct dentry *dfile_linkdown_panic;
2564static struct dentry *dfile_wr_offset;
2565static struct dentry *dfile_wr_mask;
2566static struct dentry *dfile_wr_value;
2567static struct dentry *dfile_ep_wakeirq;
2568static struct dentry *dfile_aer_enable;
2569static struct dentry *dfile_corr_counter_limit;
2570
2571static u32 rc_sel_max;
2572
2573static ssize_t msm_pcie_cmd_debug(struct file *file,
2574 const char __user *buf,
2575 size_t count, loff_t *ppos)
2576{
2577 unsigned long ret;
2578 char str[MAX_MSG_LEN];
2579 unsigned int testcase = 0;
2580 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002581 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002582
Tony Truongfdbd5672017-01-06 16:23:14 -08002583 memset(str, 0, size);
2584 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002585 if (ret)
2586 return -EFAULT;
2587
Tony Truongfdbd5672017-01-06 16:23:14 -08002588 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002589 testcase = (testcase * 10) + (str[i] - '0');
2590
2591 if (!rc_sel)
2592 rc_sel = 1;
2593
2594 pr_alert("PCIe: TEST: %d\n", testcase);
2595
2596 for (i = 0; i < MAX_RC_NUM; i++) {
2597 if (!((rc_sel >> i) & 0x1))
2598 continue;
2599 msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
2600 }
2601
2602 return count;
2603}
2604
2605const struct file_operations msm_pcie_cmd_debug_ops = {
2606 .write = msm_pcie_cmd_debug,
2607};
2608
2609static ssize_t msm_pcie_set_rc_sel(struct file *file,
2610 const char __user *buf,
2611 size_t count, loff_t *ppos)
2612{
2613 unsigned long ret;
2614 char str[MAX_MSG_LEN];
2615 int i;
2616 u32 new_rc_sel = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002617 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002618
Tony Truongfdbd5672017-01-06 16:23:14 -08002619 memset(str, 0, size);
2620 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002621 if (ret)
2622 return -EFAULT;
2623
Tony Truongfdbd5672017-01-06 16:23:14 -08002624 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002625 new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
2626
2627 if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
2628 pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
2629 pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
2630 } else {
2631 rc_sel = new_rc_sel;
2632 pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
2633 }
2634
2635 pr_alert("PCIe: the following RC(s) will be tested:\n");
2636 for (i = 0; i < MAX_RC_NUM; i++) {
2637 if (!rc_sel) {
2638 pr_alert("RC %d\n", i);
2639 break;
2640 } else if (rc_sel & (1 << i)) {
2641 pr_alert("RC %d\n", i);
2642 }
2643 }
2644
2645 return count;
2646}
2647
2648const struct file_operations msm_pcie_rc_sel_ops = {
2649 .write = msm_pcie_set_rc_sel,
2650};
2651
2652static ssize_t msm_pcie_set_base_sel(struct file *file,
2653 const char __user *buf,
2654 size_t count, loff_t *ppos)
2655{
2656 unsigned long ret;
2657 char str[MAX_MSG_LEN];
2658 int i;
2659 u32 new_base_sel = 0;
2660 char *base_sel_name;
Tony Truongfdbd5672017-01-06 16:23:14 -08002661 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002662
Tony Truongfdbd5672017-01-06 16:23:14 -08002663 memset(str, 0, size);
2664 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002665 if (ret)
2666 return -EFAULT;
2667
Tony Truongfdbd5672017-01-06 16:23:14 -08002668 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002669 new_base_sel = (new_base_sel * 10) + (str[i] - '0');
2670
2671 if (!new_base_sel || new_base_sel > 5) {
2672 pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
2673 new_base_sel);
2674 pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
2675 } else {
2676 base_sel = new_base_sel;
2677 pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
2678 }
2679
2680 switch (base_sel) {
2681 case 1:
2682 base_sel_name = "PARF";
2683 break;
2684 case 2:
2685 base_sel_name = "PHY";
2686 break;
2687 case 3:
2688 base_sel_name = "RC CONFIG SPACE";
2689 break;
2690 case 4:
2691 base_sel_name = "ELBI";
2692 break;
2693 case 5:
2694 base_sel_name = "EP CONFIG SPACE";
2695 break;
2696 default:
2697 base_sel_name = "INVALID";
2698 break;
2699 }
2700
2701 pr_alert("%s\n", base_sel_name);
2702
2703 return count;
2704}
2705
2706const struct file_operations msm_pcie_base_sel_ops = {
2707 .write = msm_pcie_set_base_sel,
2708};
2709
2710static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
2711 const char __user *buf,
2712 size_t count, loff_t *ppos)
2713{
2714 unsigned long ret;
2715 char str[MAX_MSG_LEN];
2716 u32 new_linkdown_panic = 0;
2717 int i;
2718
2719 memset(str, 0, sizeof(str));
2720 ret = copy_from_user(str, buf, sizeof(str));
2721 if (ret)
2722 return -EFAULT;
2723
2724 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2725 new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
2726
2727 if (new_linkdown_panic <= 1) {
2728 for (i = 0; i < MAX_RC_NUM; i++) {
2729 if (!rc_sel) {
2730 msm_pcie_dev[0].linkdown_panic =
2731 new_linkdown_panic;
2732 PCIE_DBG_FS(&msm_pcie_dev[0],
2733 "PCIe: RC0: linkdown_panic is now %d\n",
2734 msm_pcie_dev[0].linkdown_panic);
2735 break;
2736 } else if (rc_sel & (1 << i)) {
2737 msm_pcie_dev[i].linkdown_panic =
2738 new_linkdown_panic;
2739 PCIE_DBG_FS(&msm_pcie_dev[i],
2740 "PCIe: RC%d: linkdown_panic is now %d\n",
2741 i, msm_pcie_dev[i].linkdown_panic);
2742 }
2743 }
2744 } else {
2745 pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
2746 new_linkdown_panic);
2747 }
2748
2749 return count;
2750}
2751
2752const struct file_operations msm_pcie_linkdown_panic_ops = {
2753 .write = msm_pcie_set_linkdown_panic,
2754};
2755
2756static ssize_t msm_pcie_set_wr_offset(struct file *file,
2757 const char __user *buf,
2758 size_t count, loff_t *ppos)
2759{
2760 unsigned long ret;
2761 char str[MAX_MSG_LEN];
2762 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002763 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002764
Tony Truongfdbd5672017-01-06 16:23:14 -08002765 memset(str, 0, size);
2766 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002767 if (ret)
2768 return -EFAULT;
2769
2770 wr_offset = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002771 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002772 wr_offset = (wr_offset * 10) + (str[i] - '0');
2773
2774 pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
2775
2776 return count;
2777}
2778
2779const struct file_operations msm_pcie_wr_offset_ops = {
2780 .write = msm_pcie_set_wr_offset,
2781};
2782
2783static ssize_t msm_pcie_set_wr_mask(struct file *file,
2784 const char __user *buf,
2785 size_t count, loff_t *ppos)
2786{
2787 unsigned long ret;
2788 char str[MAX_MSG_LEN];
2789 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002790 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002791
Tony Truongfdbd5672017-01-06 16:23:14 -08002792 memset(str, 0, size);
2793 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002794 if (ret)
2795 return -EFAULT;
2796
2797 wr_mask = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002798 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002799 wr_mask = (wr_mask * 10) + (str[i] - '0');
2800
2801 pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
2802
2803 return count;
2804}
2805
2806const struct file_operations msm_pcie_wr_mask_ops = {
2807 .write = msm_pcie_set_wr_mask,
2808};
2809static ssize_t msm_pcie_set_wr_value(struct file *file,
2810 const char __user *buf,
2811 size_t count, loff_t *ppos)
2812{
2813 unsigned long ret;
2814 char str[MAX_MSG_LEN];
2815 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002816 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002817
Tony Truongfdbd5672017-01-06 16:23:14 -08002818 memset(str, 0, size);
2819 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002820 if (ret)
2821 return -EFAULT;
2822
2823 wr_value = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002824 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002825 wr_value = (wr_value * 10) + (str[i] - '0');
2826
2827 pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
2828
2829 return count;
2830}
2831
2832const struct file_operations msm_pcie_wr_value_ops = {
2833 .write = msm_pcie_set_wr_value,
2834};
2835
2836static ssize_t msm_pcie_set_ep_wakeirq(struct file *file,
2837 const char __user *buf,
2838 size_t count, loff_t *ppos)
2839{
2840 unsigned long ret;
2841 char str[MAX_MSG_LEN];
2842 u32 new_ep_wakeirq = 0;
2843 int i;
2844
2845 memset(str, 0, sizeof(str));
2846 ret = copy_from_user(str, buf, sizeof(str));
2847 if (ret)
2848 return -EFAULT;
2849
2850 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2851 new_ep_wakeirq = (new_ep_wakeirq * 10) + (str[i] - '0');
2852
2853 if (new_ep_wakeirq <= 1) {
2854 for (i = 0; i < MAX_RC_NUM; i++) {
2855 if (!rc_sel) {
2856 msm_pcie_dev[0].ep_wakeirq = new_ep_wakeirq;
2857 PCIE_DBG_FS(&msm_pcie_dev[0],
2858 "PCIe: RC0: ep_wakeirq is now %d\n",
2859 msm_pcie_dev[0].ep_wakeirq);
2860 break;
2861 } else if (rc_sel & (1 << i)) {
2862 msm_pcie_dev[i].ep_wakeirq = new_ep_wakeirq;
2863 PCIE_DBG_FS(&msm_pcie_dev[i],
2864 "PCIe: RC%d: ep_wakeirq is now %d\n",
2865 i, msm_pcie_dev[i].ep_wakeirq);
2866 }
2867 }
2868 } else {
2869 pr_err("PCIe: Invalid input for ep_wakeirq: %d. Please enter 0 or 1.\n",
2870 new_ep_wakeirq);
2871 }
2872
2873 return count;
2874}
2875
2876const struct file_operations msm_pcie_ep_wakeirq_ops = {
2877 .write = msm_pcie_set_ep_wakeirq,
2878};
2879
2880static ssize_t msm_pcie_set_aer_enable(struct file *file,
2881 const char __user *buf,
2882 size_t count, loff_t *ppos)
2883{
2884 unsigned long ret;
2885 char str[MAX_MSG_LEN];
2886 u32 new_aer_enable = 0;
2887 u32 temp_rc_sel;
2888 int i;
2889
2890 memset(str, 0, sizeof(str));
2891 ret = copy_from_user(str, buf, sizeof(str));
2892 if (ret)
2893 return -EFAULT;
2894
2895 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2896 new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
2897
2898 if (new_aer_enable > 1) {
2899 pr_err(
2900 "PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
2901 new_aer_enable);
2902 return count;
2903 }
2904
2905 if (rc_sel)
2906 temp_rc_sel = rc_sel;
2907 else
2908 temp_rc_sel = 0x1;
2909
2910 for (i = 0; i < MAX_RC_NUM; i++) {
2911 if (temp_rc_sel & (1 << i)) {
2912 msm_pcie_dev[i].aer_enable = new_aer_enable;
2913 PCIE_DBG_FS(&msm_pcie_dev[i],
2914 "PCIe: RC%d: aer_enable is now %d\n",
2915 i, msm_pcie_dev[i].aer_enable);
2916
2917 msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
2918 PCIE20_BRIDGE_CTRL,
2919 new_aer_enable ? 0 : BIT(16),
2920 new_aer_enable ? BIT(16) : 0);
2921
2922 PCIE_DBG_FS(&msm_pcie_dev[i],
2923 "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
2924 readl_relaxed(msm_pcie_dev[i].dm_core +
2925 PCIE20_BRIDGE_CTRL));
2926 }
2927 }
2928
2929 return count;
2930}
2931
2932const struct file_operations msm_pcie_aer_enable_ops = {
2933 .write = msm_pcie_set_aer_enable,
2934};
2935
2936static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
2937 const char __user *buf,
2938 size_t count, loff_t *ppos)
2939{
2940 unsigned long ret;
2941 char str[MAX_MSG_LEN];
2942 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002943 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002944
Tony Truongfdbd5672017-01-06 16:23:14 -08002945 memset(str, 0, size);
2946 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002947 if (ret)
2948 return -EFAULT;
2949
2950 corr_counter_limit = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002951 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002952 corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
2953
2954 pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
2955
2956 return count;
2957}
2958
2959const struct file_operations msm_pcie_corr_counter_limit_ops = {
2960 .write = msm_pcie_set_corr_counter_limit,
2961};
2962
2963static void msm_pcie_debugfs_init(void)
2964{
2965 rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
2966 wr_mask = 0xffffffff;
2967
2968 dent_msm_pcie = debugfs_create_dir("pci-msm", 0);
2969 if (IS_ERR(dent_msm_pcie)) {
2970 pr_err("PCIe: fail to create the folder for debug_fs.\n");
2971 return;
2972 }
2973
2974 dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
2975 dent_msm_pcie, 0,
2976 &msm_pcie_rc_sel_ops);
2977 if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
2978 pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
2979 goto rc_sel_error;
2980 }
2981
2982 dfile_case = debugfs_create_file("case", 0664,
2983 dent_msm_pcie, 0,
2984 &msm_pcie_cmd_debug_ops);
2985 if (!dfile_case || IS_ERR(dfile_case)) {
2986 pr_err("PCIe: fail to create the file for debug_fs case.\n");
2987 goto case_error;
2988 }
2989
2990 dfile_base_sel = debugfs_create_file("base_sel", 0664,
2991 dent_msm_pcie, 0,
2992 &msm_pcie_base_sel_ops);
2993 if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
2994 pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
2995 goto base_sel_error;
2996 }
2997
2998 dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
2999 dent_msm_pcie, 0,
3000 &msm_pcie_linkdown_panic_ops);
3001 if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
3002 pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
3003 goto linkdown_panic_error;
3004 }
3005
3006 dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
3007 dent_msm_pcie, 0,
3008 &msm_pcie_wr_offset_ops);
3009 if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
3010 pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
3011 goto wr_offset_error;
3012 }
3013
3014 dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
3015 dent_msm_pcie, 0,
3016 &msm_pcie_wr_mask_ops);
3017 if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
3018 pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
3019 goto wr_mask_error;
3020 }
3021
3022 dfile_wr_value = debugfs_create_file("wr_value", 0664,
3023 dent_msm_pcie, 0,
3024 &msm_pcie_wr_value_ops);
3025 if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
3026 pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
3027 goto wr_value_error;
3028 }
3029
3030 dfile_ep_wakeirq = debugfs_create_file("ep_wakeirq", 0664,
3031 dent_msm_pcie, 0,
3032 &msm_pcie_ep_wakeirq_ops);
3033 if (!dfile_ep_wakeirq || IS_ERR(dfile_ep_wakeirq)) {
3034 pr_err("PCIe: fail to create the file for debug_fs ep_wakeirq.\n");
3035 goto ep_wakeirq_error;
3036 }
3037
3038 dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
3039 dent_msm_pcie, 0,
3040 &msm_pcie_aer_enable_ops);
3041 if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
3042 pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
3043 goto aer_enable_error;
3044 }
3045
3046 dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
3047 0664, dent_msm_pcie, 0,
3048 &msm_pcie_corr_counter_limit_ops);
3049 if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
3050 pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
3051 goto corr_counter_limit_error;
3052 }
3053 return;
3054
3055corr_counter_limit_error:
3056 debugfs_remove(dfile_aer_enable);
3057aer_enable_error:
3058 debugfs_remove(dfile_ep_wakeirq);
3059ep_wakeirq_error:
3060 debugfs_remove(dfile_wr_value);
3061wr_value_error:
3062 debugfs_remove(dfile_wr_mask);
3063wr_mask_error:
3064 debugfs_remove(dfile_wr_offset);
3065wr_offset_error:
3066 debugfs_remove(dfile_linkdown_panic);
3067linkdown_panic_error:
3068 debugfs_remove(dfile_base_sel);
3069base_sel_error:
3070 debugfs_remove(dfile_case);
3071case_error:
3072 debugfs_remove(dfile_rc_sel);
3073rc_sel_error:
3074 debugfs_remove(dent_msm_pcie);
3075}
3076
3077static void msm_pcie_debugfs_exit(void)
3078{
3079 debugfs_remove(dfile_rc_sel);
3080 debugfs_remove(dfile_case);
3081 debugfs_remove(dfile_base_sel);
3082 debugfs_remove(dfile_linkdown_panic);
3083 debugfs_remove(dfile_wr_offset);
3084 debugfs_remove(dfile_wr_mask);
3085 debugfs_remove(dfile_wr_value);
3086 debugfs_remove(dfile_ep_wakeirq);
3087 debugfs_remove(dfile_aer_enable);
3088 debugfs_remove(dfile_corr_counter_limit);
3089}
3090#else
3091static void msm_pcie_debugfs_init(void)
3092{
3093}
3094
3095static void msm_pcie_debugfs_exit(void)
3096{
3097}
3098#endif
3099
3100static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
3101{
3102 return readl_relaxed(dev->dm_core +
3103 PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
3104}
3105
3106/**
3107 * msm_pcie_iatu_config - configure outbound address translation region
3108 * @dev: root commpex
3109 * @nr: region number
3110 * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
3111 * @host_addr: - region start address on host
3112 * @host_end: - region end address (low 32 bit) on host,
3113 * upper 32 bits are same as for @host_addr
3114 * @target_addr: - region start address on target
3115 */
3116static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
3117 unsigned long host_addr, u32 host_end,
3118 unsigned long target_addr)
3119{
3120 void __iomem *pcie20 = dev->dm_core;
3121
3122 if (dev->shadow_en) {
3123 dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
3124 nr;
3125 dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
3126 type;
3127 dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
3128 lower_32_bits(host_addr);
3129 dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
3130 upper_32_bits(host_addr);
3131 dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
3132 host_end;
3133 dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
3134 lower_32_bits(target_addr);
3135 dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
3136 upper_32_bits(target_addr);
3137 dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
3138 BIT(31);
3139 }
3140
3141 /* select region */
3142 writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
3143 /* ensure that hardware locks it */
3144 wmb();
3145
3146 /* switch off region before changing it */
3147 writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
3148 /* and wait till it propagates to the hardware */
3149 wmb();
3150
3151 writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
3152 writel_relaxed(lower_32_bits(host_addr),
3153 pcie20 + PCIE20_PLR_IATU_LBAR);
3154 writel_relaxed(upper_32_bits(host_addr),
3155 pcie20 + PCIE20_PLR_IATU_UBAR);
3156 writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
3157 writel_relaxed(lower_32_bits(target_addr),
3158 pcie20 + PCIE20_PLR_IATU_LTAR);
3159 writel_relaxed(upper_32_bits(target_addr),
3160 pcie20 + PCIE20_PLR_IATU_UTAR);
3161 /* ensure that changes propagated to the hardware */
3162 wmb();
3163 writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
3164
3165 /* ensure that changes propagated to the hardware */
3166 wmb();
3167
3168 if (dev->enumerated) {
3169 PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
3170 dev->pcidev_table[nr].bdf >> 24,
3171 dev->pcidev_table[nr].bdf >> 19 & 0x1f,
3172 dev->pcidev_table[nr].bdf >> 16 & 0x07);
3173 PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
3174 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
3175 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
3176 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
3177 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
3178 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
3179 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
3180 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
3181 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
3182 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
3183 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
3184 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
3185 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
3186 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
3187 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
3188 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
3189 }
3190}
3191
3192/**
3193 * msm_pcie_cfg_bdf - configure for config access
3194 * @dev: root commpex
3195 * @bus: PCI bus number
3196 * @devfn: PCI dev and function number
3197 *
3198 * Remap if required region 0 for config access of proper type
3199 * (CFG0 for bus 1, CFG1 for other buses)
3200 * Cache current device bdf for speed-up
3201 */
3202static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
3203{
3204 struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
3205 u32 bdf = BDF_OFFSET(bus, devfn);
3206 u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3207
3208 if (dev->current_bdf == bdf)
3209 return;
3210
3211 msm_pcie_iatu_config(dev, 0, type,
3212 axi_conf->start,
3213 axi_conf->start + SZ_4K - 1,
3214 bdf);
3215
3216 dev->current_bdf = bdf;
3217}
3218
3219static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
3220 u32 word_offset, u32 wr_val,
3221 u32 bdf, bool rc)
3222{
3223 int i, j;
3224 u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
3225
3226 if (rc) {
3227 dev->rc_shadow[word_offset / 4] = wr_val;
3228 } else {
3229 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3230 if (!dev->pcidev_table[i].bdf) {
3231 for (j = 0; j < max_dev; j++)
3232 if (!msm_pcie_dev_tbl[j].bdf) {
3233 msm_pcie_dev_tbl[j].bdf = bdf;
3234 break;
3235 }
3236 dev->pcidev_table[i].bdf = bdf;
3237 if ((!dev->bridge_found) && (i > 0))
3238 dev->bridge_found = true;
3239 }
3240 if (dev->pcidev_table[i].bdf == bdf) {
3241 dev->ep_shadow[i][word_offset / 4] = wr_val;
3242 break;
3243 }
3244 }
3245 }
3246}
3247
3248static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
3249 int where, int size, u32 *val)
3250{
3251 uint32_t word_offset, byte_offset, mask;
3252 uint32_t rd_val, wr_val;
3253 struct msm_pcie_dev_t *dev;
3254 void __iomem *config_base;
3255 bool rc = false;
3256 u32 rc_idx;
3257 int rv = 0;
3258 u32 bdf = BDF_OFFSET(bus->number, devfn);
3259 int i;
3260
3261 dev = PCIE_BUS_PRIV_DATA(bus);
3262
3263 if (!dev) {
3264 pr_err("PCIe: No device found for this bus.\n");
3265 *val = ~0;
3266 rv = PCIBIOS_DEVICE_NOT_FOUND;
3267 goto out;
3268 }
3269
3270 rc_idx = dev->rc_idx;
3271 rc = (bus->number == 0);
3272
3273 spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
3274
3275 if (!dev->cfg_access) {
3276 PCIE_DBG3(dev,
3277 "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
3278 rc_idx, bus->number, devfn, where, size);
3279 *val = ~0;
3280 rv = PCIBIOS_DEVICE_NOT_FOUND;
3281 goto unlock;
3282 }
3283
3284 if (rc && (devfn != 0)) {
3285 PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
3286 (oper == RD) ? "rd" : "wr", bus->number, devfn);
3287 *val = ~0;
3288 rv = PCIBIOS_DEVICE_NOT_FOUND;
3289 goto unlock;
3290 }
3291
3292 if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
3293 PCIE_DBG3(dev,
3294 "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
3295 rc_idx, bus->number, devfn, where, size);
3296 *val = ~0;
3297 rv = PCIBIOS_DEVICE_NOT_FOUND;
3298 goto unlock;
3299 }
3300
3301 /* check if the link is up for endpoint */
3302 if (!rc && !msm_pcie_is_link_up(dev)) {
3303 PCIE_ERR(dev,
3304 "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
3305 rc_idx, (oper == RD) ? "rd" : "wr",
3306 bus->number, devfn);
3307 *val = ~0;
3308 rv = PCIBIOS_DEVICE_NOT_FOUND;
3309 goto unlock;
3310 }
3311
3312 if (!rc && !dev->enumerated)
3313 msm_pcie_cfg_bdf(dev, bus->number, devfn);
3314
3315 word_offset = where & ~0x3;
3316 byte_offset = where & 0x3;
Tony Truonge48ec872017-03-14 12:47:58 -07003317 mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
Tony Truong349ee492014-10-01 17:35:56 -07003318
3319 if (rc || !dev->enumerated) {
3320 config_base = rc ? dev->dm_core : dev->conf;
3321 } else {
3322 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3323 if (dev->pcidev_table[i].bdf == bdf) {
3324 config_base = dev->pcidev_table[i].conf_base;
3325 break;
3326 }
3327 }
3328 if (i == MAX_DEVICE_NUM) {
3329 *val = ~0;
3330 rv = PCIBIOS_DEVICE_NOT_FOUND;
3331 goto unlock;
3332 }
3333 }
3334
3335 rd_val = readl_relaxed(config_base + word_offset);
3336
3337 if (oper == RD) {
3338 *val = ((rd_val & mask) >> (8 * byte_offset));
3339 PCIE_DBG3(dev,
3340 "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
3341 rc_idx, bus->number, devfn, where, size, *val, rd_val);
3342 } else {
3343 wr_val = (rd_val & ~mask) |
3344 ((*val << (8 * byte_offset)) & mask);
3345
3346 if ((bus->number == 0) && (where == 0x3c))
3347 wr_val = wr_val | (3 << 16);
3348
3349 writel_relaxed(wr_val, config_base + word_offset);
3350 wmb(); /* ensure config data is written to hardware register */
3351
Tony Truonge48ec872017-03-14 12:47:58 -07003352 if (dev->shadow_en) {
3353 if (rd_val == PCIE_LINK_DOWN &&
3354 (readl_relaxed(config_base) == PCIE_LINK_DOWN))
3355 PCIE_ERR(dev,
3356 "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
3357 rc_idx, bus->number, devfn,
3358 where, size);
3359 else
3360 msm_pcie_save_shadow(dev, word_offset, wr_val,
3361 bdf, rc);
3362 }
Tony Truong349ee492014-10-01 17:35:56 -07003363
3364 PCIE_DBG3(dev,
3365 "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
3366 rc_idx, bus->number, devfn, where, size,
3367 wr_val, rd_val, *val);
3368 }
3369
3370unlock:
3371 spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
3372out:
3373 return rv;
3374}
3375
3376static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
3377 int size, u32 *val)
3378{
3379 int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
3380
3381 if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
3382 *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
3383 PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
3384 }
3385
3386 return ret;
3387}
3388
3389static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
3390 int where, int size, u32 val)
3391{
3392 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
3393}
3394
3395static struct pci_ops msm_pcie_ops = {
3396 .read = msm_pcie_rd_conf,
3397 .write = msm_pcie_wr_conf,
3398};
3399
3400static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
3401{
3402 int rc = 0, i;
3403 struct msm_pcie_gpio_info_t *info;
3404
3405 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3406
3407 for (i = 0; i < dev->gpio_n; i++) {
3408 info = &dev->gpio[i];
3409
3410 if (!info->num)
3411 continue;
3412
3413 rc = gpio_request(info->num, info->name);
3414 if (rc) {
3415 PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
3416 dev->rc_idx, info->name, rc);
3417 break;
3418 }
3419
3420 if (info->out)
3421 rc = gpio_direction_output(info->num, info->init);
3422 else
3423 rc = gpio_direction_input(info->num);
3424 if (rc) {
3425 PCIE_ERR(dev,
3426 "PCIe: RC%d can't set direction for GPIO %s:%d\n",
3427 dev->rc_idx, info->name, rc);
3428 gpio_free(info->num);
3429 break;
3430 }
3431 }
3432
3433 if (rc)
3434 while (i--)
3435 gpio_free(dev->gpio[i].num);
3436
3437 return rc;
3438}
3439
3440static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
3441{
3442 int i;
3443
3444 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3445
3446 for (i = 0; i < dev->gpio_n; i++)
3447 gpio_free(dev->gpio[i].num);
3448}
3449
3450int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
3451{
3452 int i, rc = 0;
3453 struct regulator *vreg;
3454 struct msm_pcie_vreg_info_t *info;
3455
3456 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3457
3458 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
3459 info = &dev->vreg[i];
3460 vreg = info->hdl;
3461
3462 if (!vreg)
3463 continue;
3464
3465 PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
3466 dev->rc_idx, info->name);
3467 if (info->max_v) {
3468 rc = regulator_set_voltage(vreg,
3469 info->min_v, info->max_v);
3470 if (rc) {
3471 PCIE_ERR(dev,
3472 "PCIe: RC%d can't set voltage for %s: %d\n",
3473 dev->rc_idx, info->name, rc);
3474 break;
3475 }
3476 }
3477
3478 if (info->opt_mode) {
3479 rc = regulator_set_load(vreg, info->opt_mode);
3480 if (rc < 0) {
3481 PCIE_ERR(dev,
3482 "PCIe: RC%d can't set mode for %s: %d\n",
3483 dev->rc_idx, info->name, rc);
3484 break;
3485 }
3486 }
3487
3488 rc = regulator_enable(vreg);
3489 if (rc) {
3490 PCIE_ERR(dev,
3491 "PCIe: RC%d can't enable regulator %s: %d\n",
3492 dev->rc_idx, info->name, rc);
3493 break;
3494 }
3495 }
3496
3497 if (rc)
3498 while (i--) {
3499 struct regulator *hdl = dev->vreg[i].hdl;
3500
3501 if (hdl) {
3502 regulator_disable(hdl);
3503 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3504 PCIE_DBG(dev,
3505 "RC%d: Removing %s vote.\n",
3506 dev->rc_idx,
3507 dev->vreg[i].name);
3508 regulator_set_voltage(hdl,
3509 RPM_REGULATOR_CORNER_NONE,
3510 INT_MAX);
3511 }
3512 }
3513
3514 }
3515
3516 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3517
3518 return rc;
3519}
3520
3521static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
3522{
3523 int i;
3524
3525 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3526
3527 for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
3528 if (dev->vreg[i].hdl) {
3529 PCIE_DBG(dev, "Vreg %s is being disabled\n",
3530 dev->vreg[i].name);
3531 regulator_disable(dev->vreg[i].hdl);
3532
3533 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3534 PCIE_DBG(dev,
3535 "RC%d: Removing %s vote.\n",
3536 dev->rc_idx,
3537 dev->vreg[i].name);
3538 regulator_set_voltage(dev->vreg[i].hdl,
3539 RPM_REGULATOR_CORNER_NONE,
3540 INT_MAX);
3541 }
3542 }
3543 }
3544
3545 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3546}
3547
3548static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
3549{
3550 int i, rc = 0;
3551 struct msm_pcie_clk_info_t *info;
3552 struct msm_pcie_reset_info_t *reset_info;
3553
3554 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3555
3556 rc = regulator_enable(dev->gdsc);
3557
3558 if (rc) {
3559 PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
3560 dev->rc_idx, dev->pdev->name);
3561 return rc;
3562 }
3563
3564 if (dev->gdsc_smmu) {
3565 rc = regulator_enable(dev->gdsc_smmu);
3566
3567 if (rc) {
3568 PCIE_ERR(dev,
3569 "PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
3570 dev->rc_idx, dev->pdev->name);
3571 return rc;
3572 }
3573 }
3574
3575 PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
3576 if (dev->bus_client) {
3577 rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
3578 if (rc) {
3579 PCIE_ERR(dev,
3580 "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
3581 dev->rc_idx, rc);
3582 return rc;
3583 }
3584
3585 PCIE_DBG2(dev,
3586 "PCIe: set bus bandwidth for RC%d.\n",
3587 dev->rc_idx);
3588 }
3589
3590 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3591 info = &dev->clk[i];
3592
3593 if (!info->hdl)
3594 continue;
3595
3596 if (info->config_mem)
3597 msm_pcie_config_clock_mem(dev, info);
3598
3599 if (info->freq) {
3600 rc = clk_set_rate(info->hdl, info->freq);
3601 if (rc) {
3602 PCIE_ERR(dev,
3603 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3604 dev->rc_idx, info->name, rc);
3605 break;
3606 }
3607
3608 PCIE_DBG2(dev,
3609 "PCIe: RC%d set rate for clk %s.\n",
3610 dev->rc_idx, info->name);
3611 }
3612
3613 rc = clk_prepare_enable(info->hdl);
3614
3615 if (rc)
3616 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
3617 dev->rc_idx, info->name);
3618 else
3619 PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
3620 info->name, dev->rc_idx);
3621 }
3622
3623 if (rc) {
3624 PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
3625 dev->rc_idx);
3626 while (i--) {
3627 struct clk *hdl = dev->clk[i].hdl;
3628
3629 if (hdl)
3630 clk_disable_unprepare(hdl);
3631 }
3632
3633 if (dev->gdsc_smmu)
3634 regulator_disable(dev->gdsc_smmu);
3635
3636 regulator_disable(dev->gdsc);
3637 }
3638
3639 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3640 reset_info = &dev->reset[i];
3641 if (reset_info->hdl) {
3642 rc = reset_control_deassert(reset_info->hdl);
3643 if (rc)
3644 PCIE_ERR(dev,
3645 "PCIe: RC%d failed to deassert reset for %s.\n",
3646 dev->rc_idx, reset_info->name);
3647 else
3648 PCIE_DBG2(dev,
3649 "PCIe: RC%d successfully deasserted reset for %s.\n",
3650 dev->rc_idx, reset_info->name);
3651 }
3652 }
3653
3654 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3655
3656 return rc;
3657}
3658
3659static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
3660{
3661 int i;
3662 int rc;
3663
3664 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3665
3666 for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
3667 if (dev->clk[i].hdl)
3668 clk_disable_unprepare(dev->clk[i].hdl);
3669
3670 if (dev->bus_client) {
3671 PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
3672 dev->rc_idx);
3673
3674 rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
3675 if (rc)
3676 PCIE_ERR(dev,
3677 "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
3678 dev->rc_idx, rc);
3679 else
3680 PCIE_DBG(dev,
3681 "PCIe: relinquish bus bandwidth for RC%d.\n",
3682 dev->rc_idx);
3683 }
3684
3685 if (dev->gdsc_smmu)
3686 regulator_disable(dev->gdsc_smmu);
3687
3688 regulator_disable(dev->gdsc);
3689
3690 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3691}
3692
3693static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
3694{
3695 int i, rc = 0;
3696 struct msm_pcie_clk_info_t *info;
3697 struct msm_pcie_reset_info_t *pipe_reset_info;
3698
3699 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3700
3701 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3702 info = &dev->pipeclk[i];
3703
3704 if (!info->hdl)
3705 continue;
3706
3707
3708 if (info->config_mem)
3709 msm_pcie_config_clock_mem(dev, info);
3710
3711 if (info->freq) {
3712 rc = clk_set_rate(info->hdl, info->freq);
3713 if (rc) {
3714 PCIE_ERR(dev,
3715 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3716 dev->rc_idx, info->name, rc);
3717 break;
3718 }
3719
3720 PCIE_DBG2(dev,
3721 "PCIe: RC%d set rate for clk %s: %d.\n",
3722 dev->rc_idx, info->name, rc);
3723 }
3724
3725 rc = clk_prepare_enable(info->hdl);
3726
3727 if (rc)
3728 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
3729 dev->rc_idx, info->name);
3730 else
3731 PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
3732 dev->rc_idx, info->name);
3733 }
3734
3735 if (rc) {
3736 PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
3737 dev->rc_idx);
3738 while (i--)
3739 if (dev->pipeclk[i].hdl)
3740 clk_disable_unprepare(dev->pipeclk[i].hdl);
3741 }
3742
3743 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3744 pipe_reset_info = &dev->pipe_reset[i];
3745 if (pipe_reset_info->hdl) {
3746 rc = reset_control_deassert(
3747 pipe_reset_info->hdl);
3748 if (rc)
3749 PCIE_ERR(dev,
3750 "PCIe: RC%d failed to deassert pipe reset for %s.\n",
3751 dev->rc_idx, pipe_reset_info->name);
3752 else
3753 PCIE_DBG2(dev,
3754 "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
3755 dev->rc_idx, pipe_reset_info->name);
3756 }
3757 }
3758
3759 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3760
3761 return rc;
3762}
3763
3764static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
3765{
3766 int i;
3767
3768 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3769
3770 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
3771 if (dev->pipeclk[i].hdl)
3772 clk_disable_unprepare(
3773 dev->pipeclk[i].hdl);
3774
3775 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3776}
3777
3778static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
3779{
3780 int i;
3781 u8 type;
3782 struct msm_pcie_device_info *dev_table = dev->pcidev_table;
3783
3784 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3785 if (!dev_table[i].bdf)
3786 break;
3787
3788 type = dev_table[i].bdf >> 24 == 0x1 ?
3789 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3790
3791 msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
3792 dev_table[i].phy_address + SZ_4K - 1,
3793 dev_table[i].bdf);
3794 }
3795}
3796
3797static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
3798{
3799 int i;
3800
3801 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3802
3803 /*
3804 * program and enable address translation region 0 (device config
3805 * address space); region type config;
3806 * axi config address range to device config address range
3807 */
3808 if (dev->enumerated) {
3809 msm_pcie_iatu_config_all_ep(dev);
3810 } else {
3811 dev->current_bdf = 0; /* to force IATU re-config */
3812 msm_pcie_cfg_bdf(dev, 1, 0);
3813 }
3814
3815 /* configure N_FTS */
3816 PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3817 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3818 if (!dev->n_fts)
3819 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3820 0, BIT(15));
3821 else
3822 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3823 PCIE20_ACK_N_FTS,
3824 dev->n_fts << 8);
3825
3826 if (dev->shadow_en)
3827 dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
3828 readl_relaxed(dev->dm_core +
3829 PCIE20_ACK_F_ASPM_CTRL_REG);
3830
3831 PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3832 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3833
3834 /* configure AUX clock frequency register for PCIe core */
3835 if (dev->use_19p2mhz_aux_clk)
3836 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
3837 else
3838 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
3839
3840 /* configure the completion timeout value for PCIe core */
3841 if (dev->cpl_timeout && dev->bridge_found)
3842 msm_pcie_write_reg_field(dev->dm_core,
3843 PCIE20_DEVICE_CONTROL2_STATUS2,
3844 0xf, dev->cpl_timeout);
3845
3846 /* Enable AER on RC */
3847 if (dev->aer_enable) {
3848 msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
3849 BIT(16)|BIT(17));
3850 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
3851 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3852
3853 PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3854 readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
3855 }
3856
3857 /* configure SMMU registers */
3858 if (dev->smmu_exist) {
3859 msm_pcie_write_reg(dev->parf,
3860 PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
3861 msm_pcie_write_reg(dev->parf,
3862 PCIE20_PARF_SID_OFFSET, 0);
3863
3864 if (dev->enumerated) {
3865 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3866 if (dev->pcidev_table[i].dev &&
3867 dev->pcidev_table[i].short_bdf) {
3868 msm_pcie_write_reg(dev->parf,
3869 PCIE20_PARF_BDF_TRANSLATE_N +
3870 dev->pcidev_table[i].short_bdf
3871 * 4,
3872 dev->pcidev_table[i].bdf >> 16);
3873 }
3874 }
3875 }
3876 }
3877}
3878
3879static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
3880{
3881 u32 val;
3882 u32 current_offset;
3883 u32 ep_l1sub_ctrl1_offset = 0;
3884 u32 ep_l1sub_cap_reg1_offset = 0;
3885 u32 ep_link_cap_offset = 0;
3886 u32 ep_link_ctrlstts_offset = 0;
3887 u32 ep_dev_ctrl2stts2_offset = 0;
3888
3889 /* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/
3890 if (!dev->aux_clk_sync && dev->l1ss_supported)
3891 msm_pcie_write_mask(dev->parf +
3892 PCIE20_PARF_SYS_CTRL, BIT(3), 0);
3893
3894 current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
3895
3896 while (current_offset) {
3897 if (msm_pcie_check_align(dev, current_offset))
3898 return;
3899
3900 val = readl_relaxed(dev->conf + current_offset);
3901 if ((val & 0xff) == PCIE20_CAP_ID) {
3902 ep_link_cap_offset = current_offset + 0x0c;
3903 ep_link_ctrlstts_offset = current_offset + 0x10;
3904 ep_dev_ctrl2stts2_offset = current_offset + 0x28;
3905 break;
3906 }
3907 current_offset = (val >> 8) & 0xff;
3908 }
3909
3910 if (!ep_link_cap_offset) {
3911 PCIE_DBG(dev,
3912 "RC%d endpoint does not support PCIe capability registers\n",
3913 dev->rc_idx);
3914 return;
3915 }
3916
3917 PCIE_DBG(dev,
3918 "RC%d: ep_link_cap_offset: 0x%x\n",
3919 dev->rc_idx, ep_link_cap_offset);
3920
3921 if (dev->common_clk_en) {
3922 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3923 0, BIT(6));
3924
3925 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3926 0, BIT(6));
3927
3928 if (dev->shadow_en) {
3929 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3930 readl_relaxed(dev->dm_core +
3931 PCIE20_CAP_LINKCTRLSTATUS);
3932
3933 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3934 readl_relaxed(dev->conf +
3935 ep_link_ctrlstts_offset);
3936 }
3937
3938 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3939 readl_relaxed(dev->dm_core +
3940 PCIE20_CAP_LINKCTRLSTATUS));
3941 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3942 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3943 }
3944
3945 if (dev->clk_power_manage_en) {
3946 val = readl_relaxed(dev->conf + ep_link_cap_offset);
3947 if (val & BIT(18)) {
3948 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3949 0, BIT(8));
3950
3951 if (dev->shadow_en)
3952 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3953 readl_relaxed(dev->conf +
3954 ep_link_ctrlstts_offset);
3955
3956 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3957 readl_relaxed(dev->conf +
3958 ep_link_ctrlstts_offset));
3959 }
3960 }
3961
3962 if (dev->l0s_supported) {
3963 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3964 0, BIT(0));
3965 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3966 0, BIT(0));
3967 if (dev->shadow_en) {
3968 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3969 readl_relaxed(dev->dm_core +
3970 PCIE20_CAP_LINKCTRLSTATUS);
3971 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3972 readl_relaxed(dev->conf +
3973 ep_link_ctrlstts_offset);
3974 }
3975 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3976 readl_relaxed(dev->dm_core +
3977 PCIE20_CAP_LINKCTRLSTATUS));
3978 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3979 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3980 }
3981
3982 if (dev->l1_supported) {
3983 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3984 0, BIT(1));
3985 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3986 0, BIT(1));
3987 if (dev->shadow_en) {
3988 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3989 readl_relaxed(dev->dm_core +
3990 PCIE20_CAP_LINKCTRLSTATUS);
3991 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3992 readl_relaxed(dev->conf +
3993 ep_link_ctrlstts_offset);
3994 }
3995 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3996 readl_relaxed(dev->dm_core +
3997 PCIE20_CAP_LINKCTRLSTATUS));
3998 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3999 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
4000 }
4001
4002 if (dev->l1ss_supported) {
4003 current_offset = PCIE_EXT_CAP_OFFSET;
4004 while (current_offset) {
4005 if (msm_pcie_check_align(dev, current_offset))
4006 return;
4007
4008 val = readl_relaxed(dev->conf + current_offset);
4009 if ((val & 0xffff) == L1SUB_CAP_ID) {
4010 ep_l1sub_cap_reg1_offset = current_offset + 0x4;
4011 ep_l1sub_ctrl1_offset = current_offset + 0x8;
4012 break;
4013 }
4014 current_offset = val >> 20;
4015 }
4016 if (!ep_l1sub_ctrl1_offset) {
4017 PCIE_DBG(dev,
4018 "RC%d endpoint does not support l1ss registers\n",
4019 dev->rc_idx);
4020 return;
4021 }
4022
4023 val = readl_relaxed(dev->conf + ep_l1sub_cap_reg1_offset);
4024
4025 PCIE_DBG2(dev, "EP's L1SUB_CAPABILITY_REG_1: 0x%x\n", val);
4026 PCIE_DBG2(dev, "RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
4027 dev->rc_idx, ep_l1sub_ctrl1_offset);
4028
4029 val &= 0xf;
4030
4031 msm_pcie_write_reg_field(dev->dm_core, PCIE20_L1SUB_CONTROL1,
4032 0xf, val);
4033 msm_pcie_write_mask(dev->dm_core +
4034 PCIE20_DEVICE_CONTROL2_STATUS2,
4035 0, BIT(10));
4036 msm_pcie_write_reg_field(dev->conf, ep_l1sub_ctrl1_offset,
4037 0xf, val);
4038 msm_pcie_write_mask(dev->conf + ep_dev_ctrl2stts2_offset,
4039 0, BIT(10));
4040 if (dev->shadow_en) {
4041 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
4042 readl_relaxed(dev->dm_core +
4043 PCIE20_L1SUB_CONTROL1);
4044 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
4045 readl_relaxed(dev->dm_core +
4046 PCIE20_DEVICE_CONTROL2_STATUS2);
4047 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
4048 readl_relaxed(dev->conf +
4049 ep_l1sub_ctrl1_offset);
4050 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
4051 readl_relaxed(dev->conf +
4052 ep_dev_ctrl2stts2_offset);
4053 }
4054 PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n",
4055 readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1));
4056 PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
4057 readl_relaxed(dev->dm_core +
4058 PCIE20_DEVICE_CONTROL2_STATUS2));
4059 PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n",
4060 readl_relaxed(dev->conf + ep_l1sub_ctrl1_offset));
4061 PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
4062 readl_relaxed(dev->conf +
4063 ep_dev_ctrl2stts2_offset));
4064 }
4065}
4066
4067void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
4068{
4069 int i;
4070
4071 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4072
4073 /* program MSI controller and enable all interrupts */
4074 writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
4075 writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
4076
4077 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
4078 writel_relaxed(~0, dev->dm_core +
4079 PCIE20_MSI_CTRL_INTR_EN + (i * 12));
4080
4081 /* ensure that hardware is configured before proceeding */
4082 wmb();
4083}
4084
4085static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
4086 struct platform_device *pdev)
4087{
4088 int i, len, cnt, ret = 0, size = 0;
4089 struct msm_pcie_vreg_info_t *vreg_info;
4090 struct msm_pcie_gpio_info_t *gpio_info;
4091 struct msm_pcie_clk_info_t *clk_info;
4092 struct resource *res;
4093 struct msm_pcie_res_info_t *res_info;
4094 struct msm_pcie_irq_info_t *irq_info;
4095 struct msm_pcie_irq_info_t *msi_info;
4096 struct msm_pcie_reset_info_t *reset_info;
4097 struct msm_pcie_reset_info_t *pipe_reset_info;
4098 char prop_name[MAX_PROP_SIZE];
4099 const __be32 *prop;
4100 u32 *clkfreq = NULL;
4101
4102 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4103
4104 cnt = of_property_count_strings((&pdev->dev)->of_node,
4105 "clock-names");
4106 if (cnt > 0) {
4107 clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
4108 sizeof(*clkfreq), GFP_KERNEL);
4109 if (!clkfreq) {
4110 PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
4111 dev->rc_idx);
4112 return -ENOMEM;
4113 }
4114 ret = of_property_read_u32_array(
4115 (&pdev->dev)->of_node,
4116 "max-clock-frequency-hz", clkfreq, cnt);
4117 if (ret) {
4118 PCIE_ERR(dev,
4119 "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
4120 dev->rc_idx, ret);
4121 goto out;
4122 }
4123 }
4124
4125 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
4126 vreg_info = &dev->vreg[i];
4127 vreg_info->hdl =
4128 devm_regulator_get(&pdev->dev, vreg_info->name);
4129
4130 if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
4131 PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
4132 vreg_info->name);
4133 ret = PTR_ERR(vreg_info->hdl);
4134 goto out;
4135 }
4136
4137 if (IS_ERR(vreg_info->hdl)) {
4138 if (vreg_info->required) {
4139 PCIE_DBG(dev, "Vreg %s doesn't exist\n",
4140 vreg_info->name);
4141 ret = PTR_ERR(vreg_info->hdl);
4142 goto out;
4143 } else {
4144 PCIE_DBG(dev,
4145 "Optional Vreg %s doesn't exist\n",
4146 vreg_info->name);
4147 vreg_info->hdl = NULL;
4148 }
4149 } else {
4150 dev->vreg_n++;
4151 snprintf(prop_name, MAX_PROP_SIZE,
4152 "qcom,%s-voltage-level", vreg_info->name);
4153 prop = of_get_property((&pdev->dev)->of_node,
4154 prop_name, &len);
4155 if (!prop || (len != (3 * sizeof(__be32)))) {
4156 PCIE_DBG(dev, "%s %s property\n",
4157 prop ? "invalid format" :
4158 "no", prop_name);
4159 } else {
4160 vreg_info->max_v = be32_to_cpup(&prop[0]);
4161 vreg_info->min_v = be32_to_cpup(&prop[1]);
4162 vreg_info->opt_mode =
4163 be32_to_cpup(&prop[2]);
4164 }
4165 }
4166 }
4167
4168 dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
4169
4170 if (IS_ERR(dev->gdsc)) {
4171 PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
4172 dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
4173 if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
4174 PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
4175 dev->pdev->name);
4176 ret = PTR_ERR(dev->gdsc);
4177 goto out;
4178 }
4179
4180 dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
4181
4182 if (IS_ERR(dev->gdsc_smmu)) {
4183 PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
4184 dev->rc_idx);
4185 dev->gdsc_smmu = NULL;
4186 }
4187
4188 dev->gpio_n = 0;
4189 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
4190 gpio_info = &dev->gpio[i];
4191 ret = of_get_named_gpio((&pdev->dev)->of_node,
4192 gpio_info->name, 0);
4193 if (ret >= 0) {
4194 gpio_info->num = ret;
4195 dev->gpio_n++;
4196 PCIE_DBG(dev, "GPIO num for %s is %d\n",
4197 gpio_info->name, gpio_info->num);
4198 } else {
4199 if (gpio_info->required) {
4200 PCIE_ERR(dev,
4201 "Could not get required GPIO %s\n",
4202 gpio_info->name);
4203 goto out;
4204 } else {
4205 PCIE_DBG(dev,
4206 "Could not get optional GPIO %s\n",
4207 gpio_info->name);
4208 }
4209 }
4210 ret = 0;
4211 }
4212
4213 of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
4214 if (size) {
4215 dev->phy_sequence = (struct msm_pcie_phy_info_t *)
4216 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4217
4218 if (dev->phy_sequence) {
4219 dev->phy_len =
4220 size / sizeof(*dev->phy_sequence);
4221
4222 of_property_read_u32_array(pdev->dev.of_node,
4223 "qcom,phy-sequence",
4224 (unsigned int *)dev->phy_sequence,
4225 size / sizeof(dev->phy_sequence->offset));
4226 } else {
4227 PCIE_ERR(dev,
4228 "RC%d: Could not allocate memory for phy init sequence.\n",
4229 dev->rc_idx);
4230 ret = -ENOMEM;
4231 goto out;
4232 }
4233 } else {
4234 PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
4235 dev->rc_idx);
4236 }
4237
4238 of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
4239 if (size) {
4240 dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
4241 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
4242
4243 if (dev->port_phy_sequence) {
4244 dev->port_phy_len =
4245 size / sizeof(*dev->port_phy_sequence);
4246
4247 of_property_read_u32_array(pdev->dev.of_node,
4248 "qcom,port-phy-sequence",
4249 (unsigned int *)dev->port_phy_sequence,
4250 size / sizeof(dev->port_phy_sequence->offset));
4251 } else {
4252 PCIE_ERR(dev,
4253 "RC%d: Could not allocate memory for port phy init sequence.\n",
4254 dev->rc_idx);
4255 ret = -ENOMEM;
4256 goto out;
4257 }
4258 } else {
4259 PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
4260 dev->rc_idx);
4261 }
4262
4263 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
4264 clk_info = &dev->clk[i];
4265
4266 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4267
4268 if (IS_ERR(clk_info->hdl)) {
4269 if (clk_info->required) {
4270 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4271 clk_info->name, PTR_ERR(clk_info->hdl));
4272 ret = PTR_ERR(clk_info->hdl);
4273 goto out;
4274 } else {
4275 PCIE_DBG(dev, "Ignoring Clock %s\n",
4276 clk_info->name);
4277 clk_info->hdl = NULL;
4278 }
4279 } else {
4280 if (clkfreq != NULL) {
4281 clk_info->freq = clkfreq[i +
4282 MSM_PCIE_MAX_PIPE_CLK];
4283 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4284 clk_info->name, clk_info->freq);
4285 }
4286 }
4287 }
4288
4289 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
4290 clk_info = &dev->pipeclk[i];
4291
4292 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
4293
4294 if (IS_ERR(clk_info->hdl)) {
4295 if (clk_info->required) {
4296 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
4297 clk_info->name, PTR_ERR(clk_info->hdl));
4298 ret = PTR_ERR(clk_info->hdl);
4299 goto out;
4300 } else {
4301 PCIE_DBG(dev, "Ignoring Clock %s\n",
4302 clk_info->name);
4303 clk_info->hdl = NULL;
4304 }
4305 } else {
4306 if (clkfreq != NULL) {
4307 clk_info->freq = clkfreq[i];
4308 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
4309 clk_info->name, clk_info->freq);
4310 }
4311 }
4312 }
4313
4314 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
4315 reset_info = &dev->reset[i];
4316
4317 reset_info->hdl = devm_reset_control_get(&pdev->dev,
4318 reset_info->name);
4319
4320 if (IS_ERR(reset_info->hdl)) {
4321 if (reset_info->required) {
4322 PCIE_DBG(dev,
4323 "Reset %s isn't available:%ld\n",
4324 reset_info->name,
4325 PTR_ERR(reset_info->hdl));
4326
4327 ret = PTR_ERR(reset_info->hdl);
4328 reset_info->hdl = NULL;
4329 goto out;
4330 } else {
4331 PCIE_DBG(dev, "Ignoring Reset %s\n",
4332 reset_info->name);
4333 reset_info->hdl = NULL;
4334 }
4335 }
4336 }
4337
4338 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
4339 pipe_reset_info = &dev->pipe_reset[i];
4340
4341 pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
4342 pipe_reset_info->name);
4343
4344 if (IS_ERR(pipe_reset_info->hdl)) {
4345 if (pipe_reset_info->required) {
4346 PCIE_DBG(dev,
4347 "Pipe Reset %s isn't available:%ld\n",
4348 pipe_reset_info->name,
4349 PTR_ERR(pipe_reset_info->hdl));
4350
4351 ret = PTR_ERR(pipe_reset_info->hdl);
4352 pipe_reset_info->hdl = NULL;
4353 goto out;
4354 } else {
4355 PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
4356 pipe_reset_info->name);
4357 pipe_reset_info->hdl = NULL;
4358 }
4359 }
4360 }
4361
4362 dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
4363 if (!dev->bus_scale_table) {
4364 PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
4365 dev->rc_idx, dev->pdev->name);
4366 dev->bus_client = 0;
4367 } else {
4368 dev->bus_client =
4369 msm_bus_scale_register_client(dev->bus_scale_table);
4370 if (!dev->bus_client) {
4371 PCIE_ERR(dev,
4372 "PCIe: Failed to register bus client for RC%d (%s)\n",
4373 dev->rc_idx, dev->pdev->name);
4374 msm_bus_cl_clear_pdata(dev->bus_scale_table);
4375 ret = -ENODEV;
4376 goto out;
4377 }
4378 }
4379
4380 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
4381 res_info = &dev->res[i];
4382
4383 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4384 res_info->name);
4385
4386 if (!res) {
4387 PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
4388 dev->rc_idx, res_info->name);
4389 } else {
4390 PCIE_DBG(dev, "start addr for %s is %pa.\n",
4391 res_info->name, &res->start);
4392
4393 res_info->base = devm_ioremap(&pdev->dev,
4394 res->start, resource_size(res));
4395 if (!res_info->base) {
4396 PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
4397 dev->rc_idx, res_info->name);
4398 ret = -ENOMEM;
4399 goto out;
4400 } else {
4401 res_info->resource = res;
4402 }
4403 }
4404 }
4405
4406 for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
4407 irq_info = &dev->irq[i];
4408
4409 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4410 irq_info->name);
4411
4412 if (!res) {
4413 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4414 dev->rc_idx, irq_info->name);
4415 } else {
4416 irq_info->num = res->start;
4417 PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
4418 irq_info->num);
4419 }
4420 }
4421
4422 for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
4423 msi_info = &dev->msi[i];
4424
4425 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4426 msi_info->name);
4427
4428 if (!res) {
4429 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
4430 dev->rc_idx, msi_info->name);
4431 } else {
4432 msi_info->num = res->start;
4433 PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
4434 msi_info->num);
4435 }
4436 }
4437
4438 /* All allocations succeeded */
4439
4440 if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
4441 dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
4442 else
4443 dev->wake_n = 0;
4444
4445 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
4446 dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
4447 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
4448 dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
4449 dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
4450 dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
4451 dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
4452 dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
4453 dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
4454 dev->dev_io_res->flags = IORESOURCE_IO;
4455
4456out:
4457 kfree(clkfreq);
4458
4459 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4460
4461 return ret;
4462}
4463
4464static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
4465{
4466 dev->parf = NULL;
4467 dev->elbi = NULL;
4468 dev->dm_core = NULL;
4469 dev->conf = NULL;
4470 dev->bars = NULL;
4471 dev->tcsr = NULL;
4472 dev->dev_mem_res = NULL;
4473 dev->dev_io_res = NULL;
4474}
4475
4476int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
4477{
4478 int ret = 0;
4479 uint32_t val;
4480 long int retries = 0;
4481 int link_check_count = 0;
4482
4483 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4484
4485 mutex_lock(&dev->setup_lock);
4486
4487 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
4488 PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
4489 dev->rc_idx);
4490 goto out;
4491 }
4492
4493 /* assert PCIe reset link to keep EP in reset */
4494
4495 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4496 dev->rc_idx);
4497 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4498 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4499 usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
4500 PERST_PROPAGATION_DELAY_US_MAX);
4501
4502 /* enable power */
4503
4504 if (options & PM_VREG) {
4505 ret = msm_pcie_vreg_init(dev);
4506 if (ret)
4507 goto out;
4508 }
4509
4510 /* enable clocks */
4511 if (options & PM_CLK) {
4512 ret = msm_pcie_clk_init(dev);
4513 /* ensure that changes propagated to the hardware */
4514 wmb();
4515 if (ret)
4516 goto clk_fail;
4517 }
4518
4519 if (dev->scm_dev_id) {
4520 PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
4521 msm_pcie_restore_sec_config(dev);
4522 }
4523
4524 /* enable PCIe clocks and resets */
4525 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
4526
4527 /* change DBI base address */
4528 writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
4529
4530 writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
4531
4532 msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
4533 0, BIT(4));
4534
4535 /* enable selected IRQ */
4536 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
4537 msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
4538
4539 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
4540 BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
4541 BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
4542 BIT(MSM_PCIE_INT_EVT_AER_ERR) |
4543 BIT(MSM_PCIE_INT_EVT_MSI_0) |
4544 BIT(MSM_PCIE_INT_EVT_MSI_1) |
4545 BIT(MSM_PCIE_INT_EVT_MSI_2) |
4546 BIT(MSM_PCIE_INT_EVT_MSI_3) |
4547 BIT(MSM_PCIE_INT_EVT_MSI_4) |
4548 BIT(MSM_PCIE_INT_EVT_MSI_5) |
4549 BIT(MSM_PCIE_INT_EVT_MSI_6) |
4550 BIT(MSM_PCIE_INT_EVT_MSI_7));
4551
4552 PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
4553 dev->rc_idx,
4554 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
4555 }
4556
4557 if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_16M)
4558 writel_relaxed(SZ_32M, dev->parf +
4559 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4560 else if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_8M)
4561 writel_relaxed(SZ_16M, dev->parf +
4562 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4563 else
4564 writel_relaxed(SZ_8M, dev->parf +
4565 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
4566
4567 if (dev->use_msi) {
4568 PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
4569 val = dev->wr_halt_size ? dev->wr_halt_size :
4570 readl_relaxed(dev->parf +
4571 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
4572
4573 msm_pcie_write_reg(dev->parf,
4574 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
4575 BIT(31) | val);
4576
4577 PCIE_DBG(dev,
4578 "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
4579 dev->rc_idx,
4580 readl_relaxed(dev->parf +
4581 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
4582 }
4583
4584 mutex_lock(&com_phy_lock);
4585 /* init PCIe PHY */
4586 if (!num_rc_on)
4587 pcie_phy_init(dev);
4588
4589 num_rc_on++;
4590 mutex_unlock(&com_phy_lock);
4591
4592 if (options & PM_PIPE_CLK) {
4593 usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
4594 PHY_STABILIZATION_DELAY_US_MAX);
4595 /* Enable the pipe clock */
4596 ret = msm_pcie_pipe_clk_init(dev);
4597 /* ensure that changes propagated to the hardware */
4598 wmb();
4599 if (ret)
4600 goto link_fail;
4601 }
4602
4603 PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
4604
4605 do {
4606 if (pcie_phy_is_ready(dev))
4607 break;
4608 retries++;
4609 usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
4610 REFCLK_STABILIZATION_DELAY_US_MAX);
4611 } while (retries < PHY_READY_TIMEOUT_COUNT);
4612
4613 PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
4614 dev->rc_idx, retries);
4615
4616 if (pcie_phy_is_ready(dev))
4617 PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
4618 else {
4619 PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
4620 dev->rc_idx);
4621 ret = -ENODEV;
4622 pcie_phy_dump(dev);
4623 goto link_fail;
4624 }
4625
4626 pcie_pcs_port_phy_init(dev);
4627
4628 if (dev->ep_latency)
4629 usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
4630
4631 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4632 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4633 dev->gpio[MSM_PCIE_GPIO_EP].on);
4634
4635 /* de-assert PCIe reset link to bring EP out of reset */
4636
4637 PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
4638 dev->rc_idx);
4639 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4640 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
4641 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
4642
4643 /* set max tlp read size */
4644 msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
4645 0x7000, dev->tlp_rd_size);
4646
4647 /* enable link training */
4648 msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
4649
4650 PCIE_DBG(dev, "%s", "check if link is up\n");
4651
4652 /* Wait for up to 100ms for the link to come up */
4653 do {
4654 usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
4655 val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
Tony Truongf9f5ff02017-03-29 11:28:44 -07004656 PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
4657 dev->rc_idx, (val >> 12) & 0x3f);
Tony Truong349ee492014-10-01 17:35:56 -07004658 } while ((!(val & XMLH_LINK_UP) ||
4659 !msm_pcie_confirm_linkup(dev, false, false, NULL))
4660 && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
4661
4662 if ((val & XMLH_LINK_UP) &&
4663 msm_pcie_confirm_linkup(dev, false, false, NULL)) {
4664 PCIE_DBG(dev, "Link is up after %d checkings\n",
4665 link_check_count);
4666 PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
4667 } else {
4668 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4669 dev->rc_idx);
4670 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4671 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4672 PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
4673 dev->rc_idx);
4674 ret = -1;
4675 goto link_fail;
4676 }
4677
4678 msm_pcie_config_controller(dev);
4679
4680 if (!dev->msi_gicm_addr)
4681 msm_pcie_config_msi_controller(dev);
4682
4683 msm_pcie_config_link_state(dev);
4684
4685 dev->link_status = MSM_PCIE_LINK_ENABLED;
4686 dev->power_on = true;
4687 dev->suspending = false;
4688 dev->link_turned_on_counter++;
4689
4690 goto out;
4691
4692link_fail:
4693 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4694 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4695 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4696 msm_pcie_write_reg(dev->phy,
4697 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4698 msm_pcie_write_reg(dev->phy,
4699 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4700
4701 mutex_lock(&com_phy_lock);
4702 num_rc_on--;
4703 if (!num_rc_on && dev->common_phy) {
4704 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4705 dev->rc_idx);
4706 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4707 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4708 }
4709 mutex_unlock(&com_phy_lock);
4710
4711 msm_pcie_pipe_clk_deinit(dev);
4712 msm_pcie_clk_deinit(dev);
4713clk_fail:
4714 msm_pcie_vreg_deinit(dev);
4715out:
4716 mutex_unlock(&dev->setup_lock);
4717
4718 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4719
4720 return ret;
4721}
4722
4723void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
4724{
4725 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4726
4727 mutex_lock(&dev->setup_lock);
4728
4729 if (!dev->power_on) {
4730 PCIE_DBG(dev,
4731 "PCIe: the link of RC%d is already power down.\n",
4732 dev->rc_idx);
4733 mutex_unlock(&dev->setup_lock);
4734 return;
4735 }
4736
4737 dev->link_status = MSM_PCIE_LINK_DISABLED;
4738 dev->power_on = false;
4739 dev->link_turned_off_counter++;
4740
4741 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4742 dev->rc_idx);
4743
4744 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4745 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4746
4747 msm_pcie_write_reg(dev->phy,
4748 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
4749 msm_pcie_write_reg(dev->phy,
4750 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
4751
4752 mutex_lock(&com_phy_lock);
4753 num_rc_on--;
4754 if (!num_rc_on && dev->common_phy) {
4755 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
4756 dev->rc_idx);
4757 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
4758 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
4759 }
4760 mutex_unlock(&com_phy_lock);
4761
4762 if (options & PM_CLK) {
4763 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
4764 BIT(0));
4765 msm_pcie_clk_deinit(dev);
4766 }
4767
4768 if (options & PM_VREG)
4769 msm_pcie_vreg_deinit(dev);
4770
4771 if (options & PM_PIPE_CLK)
4772 msm_pcie_pipe_clk_deinit(dev);
4773
4774 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4775 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4776 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4777
4778 mutex_unlock(&dev->setup_lock);
4779
4780 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4781}
4782
4783static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
4784 struct msm_pcie_device_info *ep_dev_info)
4785{
4786 u32 val;
4787 void __iomem *ep_base = ep_dev_info->conf_base;
4788 u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
4789 0xff;
4790
4791 while (current_offset) {
4792 if (msm_pcie_check_align(dev, current_offset))
4793 return;
4794
4795 val = readl_relaxed(ep_base + current_offset);
4796 if ((val & 0xff) == PCIE20_CAP_ID) {
4797 ep_dev_info->dev_ctrlstts_offset =
4798 current_offset + 0x8;
4799 break;
4800 }
4801 current_offset = (val >> 8) & 0xff;
4802 }
4803
4804 if (!ep_dev_info->dev_ctrlstts_offset) {
4805 PCIE_DBG(dev,
4806 "RC%d endpoint does not support PCIe cap registers\n",
4807 dev->rc_idx);
4808 return;
4809 }
4810
4811 PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
4812 dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
4813
4814 /* Enable AER on EP */
4815 msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
4816 BIT(3)|BIT(2)|BIT(1)|BIT(0));
4817
4818 PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
4819 readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
4820}
4821
4822static int msm_pcie_config_device_table(struct device *dev, void *pdev)
4823{
4824 struct pci_dev *pcidev = to_pci_dev(dev);
4825 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
4826 struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
4827 struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
4828 int ret = 0;
4829 u32 rc_idx = pcie_dev->rc_idx;
4830 u32 i, index;
4831 u32 bdf = 0;
4832 u8 type;
4833 u32 h_type;
4834 u32 bme;
4835
4836 if (!pcidev) {
4837 PCIE_ERR(pcie_dev,
4838 "PCIe: Did not find PCI device in list for RC%d.\n",
4839 pcie_dev->rc_idx);
4840 return -ENODEV;
4841 }
4842
4843 PCIE_DBG(pcie_dev,
4844 "PCI device found: vendor-id:0x%x device-id:0x%x\n",
4845 pcidev->vendor, pcidev->device);
4846
4847 if (!pcidev->bus->number)
4848 return ret;
4849
4850 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4851 type = pcidev->bus->number == 1 ?
4852 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
4853
4854 for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
4855 if (msm_pcie_dev_tbl[i].bdf == bdf &&
4856 !msm_pcie_dev_tbl[i].dev) {
4857 for (index = 0; index < MAX_DEVICE_NUM; index++) {
4858 if (dev_table_t[index].bdf == bdf) {
4859 msm_pcie_dev_tbl[i].dev = pcidev;
4860 msm_pcie_dev_tbl[i].domain = rc_idx;
4861 msm_pcie_dev_tbl[i].conf_base =
4862 pcie_dev->conf + index * SZ_4K;
4863 msm_pcie_dev_tbl[i].phy_address =
4864 axi_conf->start + index * SZ_4K;
4865
4866 dev_table_t[index].dev = pcidev;
4867 dev_table_t[index].domain = rc_idx;
4868 dev_table_t[index].conf_base =
4869 pcie_dev->conf + index * SZ_4K;
4870 dev_table_t[index].phy_address =
4871 axi_conf->start + index * SZ_4K;
4872
4873 msm_pcie_iatu_config(pcie_dev, index,
4874 type,
4875 dev_table_t[index].phy_address,
4876 dev_table_t[index].phy_address
4877 + SZ_4K - 1,
4878 bdf);
4879
4880 h_type = readl_relaxed(
4881 dev_table_t[index].conf_base +
4882 PCIE20_HEADER_TYPE);
4883
4884 bme = readl_relaxed(
4885 dev_table_t[index].conf_base +
4886 PCIE20_COMMAND_STATUS);
4887
4888 if (h_type & (1 << 16)) {
4889 pci_write_config_dword(pcidev,
4890 PCIE20_COMMAND_STATUS,
4891 bme | 0x06);
4892 } else {
4893 pcie_dev->num_ep++;
4894 dev_table_t[index].registered =
4895 false;
4896 }
4897
4898 if (pcie_dev->num_ep > 1)
4899 pcie_dev->pending_ep_reg = true;
4900
4901 msm_pcie_config_ep_aer(pcie_dev,
4902 &dev_table_t[index]);
4903
4904 break;
4905 }
4906 }
4907 if (index == MAX_DEVICE_NUM) {
4908 PCIE_ERR(pcie_dev,
4909 "RC%d PCI device table is full.\n",
4910 rc_idx);
4911 ret = index;
4912 } else {
4913 break;
4914 }
4915 } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
4916 pcidev == msm_pcie_dev_tbl[i].dev) {
4917 break;
4918 }
4919 }
4920 if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
4921 PCIE_ERR(pcie_dev,
4922 "Global PCI device table is full: %d elements.\n",
4923 i);
4924 PCIE_ERR(pcie_dev,
4925 "Bus number is 0x%x\nDevice number is 0x%x\n",
4926 pcidev->bus->number, pcidev->devfn);
4927 ret = i;
4928 }
4929 return ret;
4930}
4931
4932int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
4933{
4934 struct pci_dev *pcidev;
4935 struct msm_pcie_dev_t *pcie_dev;
4936 struct pci_bus *bus;
4937 int i;
4938 u32 bdf;
4939
4940 if (!dev) {
4941 pr_err("%s: PCIe: endpoint device passed in is NULL\n",
4942 __func__);
4943 return MSM_PCIE_ERROR;
4944 }
4945
4946 pcidev = to_pci_dev(dev);
4947 if (!pcidev) {
4948 pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
4949 __func__);
4950 return MSM_PCIE_ERROR;
4951 }
4952
4953 bus = pcidev->bus;
4954 if (!bus) {
4955 pr_err("%s: PCIe: Bus of PCI device is NULL\n",
4956 __func__);
4957 return MSM_PCIE_ERROR;
4958 }
4959
4960 while (!pci_is_root_bus(bus))
4961 bus = bus->parent;
4962
4963 pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
4964 if (!pcie_dev) {
4965 pr_err("%s: PCIe: Could not get PCIe structure\n",
4966 __func__);
4967 return MSM_PCIE_ERROR;
4968 }
4969
4970 if (!pcie_dev->smmu_exist) {
4971 PCIE_DBG(pcie_dev,
4972 "PCIe: RC:%d: smmu does not exist\n",
4973 pcie_dev->rc_idx);
4974 return MSM_PCIE_ERROR;
4975 }
4976
4977 PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
4978 pcie_dev->rc_idx, dev);
4979 PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
4980 pcie_dev->rc_idx, pcidev);
4981
4982 *domain = pcie_dev->rc_idx;
4983
4984 if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
4985 pcie_dev->current_short_bdf++;
4986 } else {
4987 PCIE_ERR(pcie_dev,
4988 "PCIe: RC%d: No more short BDF left\n",
4989 pcie_dev->rc_idx);
4990 return MSM_PCIE_ERROR;
4991 }
4992
4993 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4994
4995 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4996 if (pcie_dev->pcidev_table[i].bdf == bdf) {
4997 *sid = pcie_dev->smmu_sid_base +
4998 ((pcie_dev->rc_idx << 4) |
4999 pcie_dev->current_short_bdf);
5000
5001 msm_pcie_write_reg(pcie_dev->parf,
5002 PCIE20_PARF_BDF_TRANSLATE_N +
5003 pcie_dev->current_short_bdf * 4,
5004 bdf >> 16);
5005
5006 pcie_dev->pcidev_table[i].sid = *sid;
5007 pcie_dev->pcidev_table[i].short_bdf =
5008 pcie_dev->current_short_bdf;
5009 break;
5010 }
5011 }
5012
5013 if (i == MAX_DEVICE_NUM) {
5014 pcie_dev->current_short_bdf--;
5015 PCIE_ERR(pcie_dev,
5016 "PCIe: RC%d could not find BDF:%d\n",
5017 pcie_dev->rc_idx, bdf);
5018 return MSM_PCIE_ERROR;
5019 }
5020
5021 PCIE_DBG(pcie_dev,
5022 "PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
5023 pcie_dev->rc_idx,
5024 bdf >> 24,
5025 bdf >> 19 & 0x1f,
5026 bdf >> 16 & 0x07,
5027 *sid);
5028
5029 return 0;
5030}
5031EXPORT_SYMBOL(msm_pcie_configure_sid);
5032
5033int msm_pcie_enumerate(u32 rc_idx)
5034{
5035 int ret = 0, bus_ret = 0, scan_ret = 0;
5036 struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
5037
5038 mutex_lock(&dev->enumerate_lock);
5039
5040 PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
5041
5042 if (!dev->drv_ready) {
5043 PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
5044 rc_idx);
5045 ret = -EPROBE_DEFER;
5046 goto out;
5047 }
5048
5049 if (!dev->enumerated) {
5050 ret = msm_pcie_enable(dev, PM_ALL);
5051
5052 /* kick start ARM PCI configuration framework */
5053 if (!ret) {
5054 struct pci_dev *pcidev = NULL;
5055 bool found = false;
5056 struct pci_bus *bus;
5057 resource_size_t iobase = 0;
5058 u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
5059 u32 vendor_id = ids & 0xffff;
5060 u32 device_id = (ids & 0xffff0000) >> 16;
5061 LIST_HEAD(res);
5062
5063 PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
5064 vendor_id, device_id);
5065
5066 ret = of_pci_get_host_bridge_resources(
5067 dev->pdev->dev.of_node,
5068 0, 0xff, &res, &iobase);
5069 if (ret) {
5070 PCIE_ERR(dev,
5071 "PCIe: failed to get host bridge resources for RC%d: %d\n",
5072 dev->rc_idx, ret);
5073 goto out;
5074 }
5075
5076 bus = pci_create_root_bus(&dev->pdev->dev, 0,
5077 &msm_pcie_ops,
5078 msm_pcie_setup_sys_data(dev),
5079 &res);
5080 if (!bus) {
5081 PCIE_ERR(dev,
5082 "PCIe: failed to create root bus for RC%d\n",
5083 dev->rc_idx);
5084 ret = -ENOMEM;
5085 goto out;
5086 }
5087
5088 scan_ret = pci_scan_child_bus(bus);
5089 PCIE_DBG(dev,
5090 "PCIe: RC%d: The max subordinate bus number discovered is %d\n",
5091 dev->rc_idx, ret);
5092
5093 msm_pcie_fixup_irqs(dev);
5094 pci_assign_unassigned_bus_resources(bus);
5095 pci_bus_add_devices(bus);
5096
5097 dev->enumerated = true;
5098
5099 msm_pcie_write_mask(dev->dm_core +
5100 PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
5101
5102 if (dev->cpl_timeout && dev->bridge_found)
5103 msm_pcie_write_reg_field(dev->dm_core,
5104 PCIE20_DEVICE_CONTROL2_STATUS2,
5105 0xf, dev->cpl_timeout);
5106
5107 if (dev->shadow_en) {
5108 u32 val = readl_relaxed(dev->dm_core +
5109 PCIE20_COMMAND_STATUS);
5110 PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
5111 val);
5112 dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
5113 }
5114
5115 do {
5116 pcidev = pci_get_device(vendor_id,
5117 device_id, pcidev);
5118 if (pcidev && (&msm_pcie_dev[rc_idx] ==
5119 (struct msm_pcie_dev_t *)
5120 PCIE_BUS_PRIV_DATA(pcidev->bus))) {
5121 msm_pcie_dev[rc_idx].dev = pcidev;
5122 found = true;
5123 PCIE_DBG(&msm_pcie_dev[rc_idx],
5124 "PCI device is found for RC%d\n",
5125 rc_idx);
5126 }
5127 } while (!found && pcidev);
5128
5129 if (!pcidev) {
5130 PCIE_ERR(dev,
5131 "PCIe: Did not find PCI device for RC%d.\n",
5132 dev->rc_idx);
5133 ret = -ENODEV;
5134 goto out;
5135 }
5136
5137 bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
5138 &msm_pcie_config_device_table);
5139
5140 if (bus_ret) {
5141 PCIE_ERR(dev,
5142 "PCIe: Failed to set up device table for RC%d\n",
5143 dev->rc_idx);
5144 ret = -ENODEV;
5145 goto out;
5146 }
5147 } else {
5148 PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
5149 dev->rc_idx);
5150 }
5151 } else {
5152 PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
5153 dev->rc_idx);
5154 }
5155
5156out:
5157 mutex_unlock(&dev->enumerate_lock);
5158
5159 return ret;
5160}
5161EXPORT_SYMBOL(msm_pcie_enumerate);
5162
5163static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
5164 enum msm_pcie_event event)
5165{
5166 if (dev->event_reg && dev->event_reg->callback &&
5167 (dev->event_reg->events & event)) {
5168 struct msm_pcie_notify *notify = &dev->event_reg->notify;
5169
5170 notify->event = event;
5171 notify->user = dev->event_reg->user;
5172 PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
5173 dev->rc_idx, event);
5174 dev->event_reg->callback(notify);
5175
5176 if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
5177 (event == MSM_PCIE_EVENT_LINKDOWN)) {
5178 dev->user_suspend = true;
5179 PCIE_DBG(dev,
5180 "PCIe: Client of RC%d will recover the link later.\n",
5181 dev->rc_idx);
5182 return;
5183 }
5184 } else {
5185 PCIE_DBG2(dev,
5186 "PCIe: Client of RC%d does not have registration for event %d\n",
5187 dev->rc_idx, event);
5188 }
5189}
5190
5191static void handle_wake_func(struct work_struct *work)
5192{
5193 int i, ret;
5194 struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
5195 handle_wake_work);
5196
5197 PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
5198
5199 mutex_lock(&dev->recovery_lock);
5200
5201 if (!dev->enumerated) {
5202 PCIE_DBG(dev,
5203 "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
5204 dev->rc_idx);
5205
5206 ret = msm_pcie_enumerate(dev->rc_idx);
5207 if (ret) {
5208 PCIE_ERR(dev,
5209 "PCIe: failed to enable RC%d upon wake request from the device.\n",
5210 dev->rc_idx);
5211 goto out;
5212 }
5213
5214 if (dev->num_ep > 1) {
5215 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5216 dev->event_reg = dev->pcidev_table[i].event_reg;
5217
5218 if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
5219 && dev->event_reg &&
5220 dev->event_reg->callback &&
5221 (dev->event_reg->events &
5222 MSM_PCIE_EVENT_LINKUP)) {
5223 struct msm_pcie_notify *notify =
5224 &dev->event_reg->notify;
5225 notify->event = MSM_PCIE_EVENT_LINKUP;
5226 notify->user = dev->event_reg->user;
5227 PCIE_DBG(dev,
5228 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5229 dev->rc_idx);
5230 dev->event_reg->callback(notify);
5231 }
5232 }
5233 } else {
5234 if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
5235 dev->event_reg && dev->event_reg->callback &&
5236 (dev->event_reg->events &
5237 MSM_PCIE_EVENT_LINKUP)) {
5238 struct msm_pcie_notify *notify =
5239 &dev->event_reg->notify;
5240 notify->event = MSM_PCIE_EVENT_LINKUP;
5241 notify->user = dev->event_reg->user;
5242 PCIE_DBG(dev,
5243 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
5244 dev->rc_idx);
5245 dev->event_reg->callback(notify);
5246 } else {
5247 PCIE_DBG(dev,
5248 "PCIe: Client of RC%d does not have registration for linkup event.\n",
5249 dev->rc_idx);
5250 }
5251 }
5252 goto out;
5253 } else {
5254 PCIE_ERR(dev,
5255 "PCIe: The enumeration for RC%d has already been done.\n",
5256 dev->rc_idx);
5257 goto out;
5258 }
5259
5260out:
5261 mutex_unlock(&dev->recovery_lock);
5262}
5263
5264static irqreturn_t handle_aer_irq(int irq, void *data)
5265{
5266 struct msm_pcie_dev_t *dev = data;
5267
5268 int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
5269 int ep_corr_val = 0, ep_uncorr_val = 0;
5270 int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
5271 u32 ep_dev_ctrlstts_offset = 0;
5272 int i, j, ep_src_bdf = 0;
5273 void __iomem *ep_base = NULL;
5274 unsigned long irqsave_flags;
5275
5276 PCIE_DBG2(dev,
5277 "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
5278 dev->rc_idx, irq, dev->rc_corr_counter,
5279 dev->rc_non_fatal_counter, dev->rc_fatal_counter,
5280 dev->ep_corr_counter, dev->ep_non_fatal_counter,
5281 dev->ep_fatal_counter);
5282
5283 spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
5284
5285 if (dev->suspending) {
5286 PCIE_DBG2(dev,
5287 "PCIe: RC%d is currently suspending.\n",
5288 dev->rc_idx);
5289 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5290 return IRQ_HANDLED;
5291 }
5292
5293 uncorr_val = readl_relaxed(dev->dm_core +
5294 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5295 corr_val = readl_relaxed(dev->dm_core +
5296 PCIE20_AER_CORR_ERR_STATUS_REG);
5297 rc_err_status = readl_relaxed(dev->dm_core +
5298 PCIE20_AER_ROOT_ERR_STATUS_REG);
5299 rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
5300 PCIE20_CAP_DEVCTRLSTATUS);
5301
5302 if (uncorr_val)
5303 PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5304 uncorr_val);
5305 if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
5306 PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5307 corr_val);
5308
5309 if ((rc_dev_ctrlstts >> 18) & 0x1)
5310 dev->rc_fatal_counter++;
5311 if ((rc_dev_ctrlstts >> 17) & 0x1)
5312 dev->rc_non_fatal_counter++;
5313 if ((rc_dev_ctrlstts >> 16) & 0x1)
5314 dev->rc_corr_counter++;
5315
5316 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
5317 BIT(18)|BIT(17)|BIT(16));
5318
5319 if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
5320 PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
5321 goto out;
5322 }
5323
5324 for (i = 0; i < 2; i++) {
5325 if (i)
5326 ep_src_bdf = readl_relaxed(dev->dm_core +
5327 PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
5328 else
5329 ep_src_bdf = (readl_relaxed(dev->dm_core +
5330 PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
5331
5332 if (!ep_src_bdf)
5333 continue;
5334
5335 for (j = 0; j < MAX_DEVICE_NUM; j++) {
5336 if (ep_src_bdf == dev->pcidev_table[j].bdf) {
5337 PCIE_DBG2(dev,
5338 "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
5339 i ? "Uncorrectable" : "Correctable",
5340 dev->pcidev_table[j].bdf >> 24,
5341 dev->pcidev_table[j].bdf >> 19 & 0x1f,
5342 dev->pcidev_table[j].bdf >> 16 & 0x07);
5343 ep_base = dev->pcidev_table[j].conf_base;
5344 ep_dev_ctrlstts_offset = dev->
5345 pcidev_table[j].dev_ctrlstts_offset;
5346 break;
5347 }
5348 }
5349
5350 if (!ep_base) {
5351 PCIE_ERR(dev,
5352 "PCIe: RC%d no endpoint found for reported error\n",
5353 dev->rc_idx);
5354 goto out;
5355 }
5356
5357 ep_uncorr_val = readl_relaxed(ep_base +
5358 PCIE20_AER_UNCORR_ERR_STATUS_REG);
5359 ep_corr_val = readl_relaxed(ep_base +
5360 PCIE20_AER_CORR_ERR_STATUS_REG);
5361 ep_dev_ctrlstts = readl_relaxed(ep_base +
5362 ep_dev_ctrlstts_offset);
5363
5364 if (ep_uncorr_val)
5365 PCIE_DBG(dev,
5366 "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
5367 ep_uncorr_val);
5368 if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
5369 PCIE_DBG(dev,
5370 "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
5371 ep_corr_val);
5372
5373 if ((ep_dev_ctrlstts >> 18) & 0x1)
5374 dev->ep_fatal_counter++;
5375 if ((ep_dev_ctrlstts >> 17) & 0x1)
5376 dev->ep_non_fatal_counter++;
5377 if ((ep_dev_ctrlstts >> 16) & 0x1)
5378 dev->ep_corr_counter++;
5379
5380 msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
5381 BIT(18)|BIT(17)|BIT(16));
5382
5383 msm_pcie_write_reg_field(ep_base,
5384 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5385 0x3fff031, 0x3fff031);
5386 msm_pcie_write_reg_field(ep_base,
5387 PCIE20_AER_CORR_ERR_STATUS_REG,
5388 0xf1c1, 0xf1c1);
5389 }
5390out:
5391 if (((dev->rc_corr_counter < corr_counter_limit) &&
5392 (dev->ep_corr_counter < corr_counter_limit)) ||
5393 uncorr_val || ep_uncorr_val)
5394 PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
5395 rc_err_status);
5396 msm_pcie_write_reg_field(dev->dm_core,
5397 PCIE20_AER_UNCORR_ERR_STATUS_REG,
5398 0x3fff031, 0x3fff031);
5399 msm_pcie_write_reg_field(dev->dm_core,
5400 PCIE20_AER_CORR_ERR_STATUS_REG,
5401 0xf1c1, 0xf1c1);
5402 msm_pcie_write_reg_field(dev->dm_core,
5403 PCIE20_AER_ROOT_ERR_STATUS_REG,
5404 0x7f, 0x7f);
5405
5406 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
5407 return IRQ_HANDLED;
5408}
5409
5410static irqreturn_t handle_wake_irq(int irq, void *data)
5411{
5412 struct msm_pcie_dev_t *dev = data;
5413 unsigned long irqsave_flags;
5414 int i;
5415
5416 spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
5417
5418 dev->wake_counter++;
5419 PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
5420 dev->wake_counter, dev->rc_idx);
5421
5422 PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
5423 dev->rc_idx);
5424
5425 if (!dev->enumerated) {
5426 PCIE_DBG(dev, "Start enumeating RC%d\n", dev->rc_idx);
5427 if (dev->ep_wakeirq)
5428 schedule_work(&dev->handle_wake_work);
5429 else
5430 PCIE_DBG(dev,
5431 "wake irq is received but ep_wakeirq is not supported for RC%d.\n",
5432 dev->rc_idx);
5433 } else {
5434 PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
5435 __pm_stay_awake(&dev->ws);
5436 __pm_relax(&dev->ws);
5437
5438 if (dev->num_ep > 1) {
5439 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5440 dev->event_reg =
5441 dev->pcidev_table[i].event_reg;
5442 msm_pcie_notify_client(dev,
5443 MSM_PCIE_EVENT_WAKEUP);
5444 }
5445 } else {
5446 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
5447 }
5448 }
5449
5450 spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
5451
5452 return IRQ_HANDLED;
5453}
5454
5455static irqreturn_t handle_linkdown_irq(int irq, void *data)
5456{
5457 struct msm_pcie_dev_t *dev = data;
5458 unsigned long irqsave_flags;
5459 int i;
5460
5461 spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
5462
5463 dev->linkdown_counter++;
5464
5465 PCIE_DBG(dev,
5466 "PCIe: No. %ld linkdown IRQ for RC%d.\n",
5467 dev->linkdown_counter, dev->rc_idx);
5468
5469 if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
5470 PCIE_DBG(dev,
5471 "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
5472 dev->rc_idx);
5473 } else if (dev->suspending) {
5474 PCIE_DBG(dev,
5475 "PCIe:the link of RC%d is suspending.\n",
5476 dev->rc_idx);
5477 } else {
5478 dev->link_status = MSM_PCIE_LINK_DISABLED;
5479 dev->shadow_en = false;
5480
5481 if (dev->linkdown_panic)
5482 panic("User has chosen to panic on linkdown\n");
5483
5484 /* assert PERST */
5485 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
5486 dev->gpio[MSM_PCIE_GPIO_PERST].on);
5487 PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
5488
5489 if (dev->num_ep > 1) {
5490 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5491 dev->event_reg =
5492 dev->pcidev_table[i].event_reg;
5493 msm_pcie_notify_client(dev,
5494 MSM_PCIE_EVENT_LINKDOWN);
5495 }
5496 } else {
5497 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
5498 }
5499 }
5500
5501 spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
5502
5503 return IRQ_HANDLED;
5504}
5505
5506static irqreturn_t handle_msi_irq(int irq, void *data)
5507{
5508 int i, j;
5509 unsigned long val;
5510 struct msm_pcie_dev_t *dev = data;
5511 void __iomem *ctrl_status;
5512
5513 PCIE_DUMP(dev, "irq: %d\n", irq);
5514
5515 /*
5516 * check for set bits, clear it by setting that bit
5517 * and trigger corresponding irq
5518 */
5519 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
5520 ctrl_status = dev->dm_core +
5521 PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
5522
5523 val = readl_relaxed(ctrl_status);
5524 while (val) {
5525 j = find_first_bit(&val, 32);
5526 writel_relaxed(BIT(j), ctrl_status);
5527 /* ensure that interrupt is cleared (acked) */
5528 wmb();
5529 generic_handle_irq(
5530 irq_find_mapping(dev->irq_domain, (j + (32*i)))
5531 );
5532 val = readl_relaxed(ctrl_status);
5533 }
5534 }
5535
5536 return IRQ_HANDLED;
5537}
5538
5539static irqreturn_t handle_global_irq(int irq, void *data)
5540{
5541 int i;
5542 struct msm_pcie_dev_t *dev = data;
5543 unsigned long irqsave_flags;
5544 u32 status = 0;
5545
5546 spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
5547
5548 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
5549 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
5550
5551 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
5552
5553 PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
5554 dev->rc_idx, irq, status);
5555
5556 for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
5557 if (status & BIT(i)) {
5558 switch (i) {
5559 case MSM_PCIE_INT_EVT_LINK_DOWN:
5560 PCIE_DBG(dev,
5561 "PCIe: RC%d: handle linkdown event.\n",
5562 dev->rc_idx);
5563 handle_linkdown_irq(irq, data);
5564 break;
5565 case MSM_PCIE_INT_EVT_AER_LEGACY:
5566 PCIE_DBG(dev,
5567 "PCIe: RC%d: AER legacy event.\n",
5568 dev->rc_idx);
5569 handle_aer_irq(irq, data);
5570 break;
5571 case MSM_PCIE_INT_EVT_AER_ERR:
5572 PCIE_DBG(dev,
5573 "PCIe: RC%d: AER event.\n",
5574 dev->rc_idx);
5575 handle_aer_irq(irq, data);
5576 break;
5577 default:
5578 PCIE_ERR(dev,
5579 "PCIe: RC%d: Unexpected event %d is caught!\n",
5580 dev->rc_idx, i);
5581 }
5582 }
5583 }
5584
5585 spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
5586
5587 return IRQ_HANDLED;
5588}
5589
5590void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev)
5591{
5592 int pos, i;
5593 struct msm_pcie_dev_t *dev;
5594
5595 if (pcie_dev)
5596 dev = pcie_dev;
5597 else
5598 dev = irq_get_chip_data(irq);
5599
5600 if (!dev) {
5601 pr_err("PCIe: device is null. IRQ:%d\n", irq);
5602 return;
5603 }
5604
5605 if (dev->msi_gicm_addr) {
5606 PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
5607
5608 for (i = 0; i < MSM_PCIE_MAX_MSI; i++)
5609 if (irq == dev->msi[i].num)
5610 break;
5611 if (i == MSM_PCIE_MAX_MSI) {
5612 PCIE_ERR(dev,
5613 "Could not find irq: %d in RC%d MSI table\n",
5614 irq, dev->rc_idx);
5615 return;
5616 }
5617
5618 pos = i;
5619 } else {
5620 PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
5621 pos = irq - irq_find_mapping(dev->irq_domain, 0);
5622 }
5623
5624 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5625
5626 PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
5627 pos, *dev->msi_irq_in_use);
5628 clear_bit(pos, dev->msi_irq_in_use);
5629 PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
5630 pos, *dev->msi_irq_in_use);
5631}
5632
5633/* hookup to linux pci msi framework */
5634void arch_teardown_msi_irq(unsigned int irq)
5635{
5636 PCIE_GEN_DBG("irq %d deallocated\n", irq);
5637 msm_pcie_destroy_irq(irq, NULL);
5638}
5639
5640void arch_teardown_msi_irqs(struct pci_dev *dev)
5641{
5642 struct msi_desc *entry;
5643 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5644
5645 PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
5646 pcie_dev->rc_idx, dev->vendor, dev->device);
5647
5648 pcie_dev->use_msi = false;
5649
5650 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5651 int i, nvec;
5652
5653 if (entry->irq == 0)
5654 continue;
5655 nvec = 1 << entry->msi_attrib.multiple;
5656 for (i = 0; i < nvec; i++)
5657 msm_pcie_destroy_irq(entry->irq + i, pcie_dev);
5658 }
5659}
5660
5661static void msm_pcie_msi_nop(struct irq_data *d)
5662{
5663}
5664
5665static struct irq_chip pcie_msi_chip = {
5666 .name = "msm-pcie-msi",
5667 .irq_ack = msm_pcie_msi_nop,
5668 .irq_enable = unmask_msi_irq,
5669 .irq_disable = mask_msi_irq,
5670 .irq_mask = mask_msi_irq,
5671 .irq_unmask = unmask_msi_irq,
5672};
5673
5674static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
5675{
5676 int irq, pos;
5677
5678 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5679
5680again:
5681 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5682
5683 if (pos >= PCIE_MSI_NR_IRQS)
5684 return -ENOSPC;
5685
5686 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5687
5688 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5689 goto again;
5690 else
5691 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5692
5693 irq = irq_create_mapping(dev->irq_domain, pos);
5694 if (!irq)
5695 return -EINVAL;
5696
5697 return irq;
5698}
5699
5700static int arch_setup_msi_irq_default(struct pci_dev *pdev,
5701 struct msi_desc *desc, int nvec)
5702{
5703 int irq;
5704 struct msi_msg msg;
5705 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5706
5707 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5708
5709 irq = msm_pcie_create_irq(dev);
5710
5711 PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
5712
5713 if (irq < 0)
5714 return irq;
5715
5716 PCIE_DBG(dev, "irq %d allocated\n", irq);
5717
5718 irq_set_msi_desc(irq, desc);
5719
5720 /* write msi vector and data */
5721 msg.address_hi = 0;
5722 msg.address_lo = MSM_PCIE_MSI_PHY;
5723 msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
5724 write_msi_msg(irq, &msg);
5725
5726 return 0;
5727}
5728
5729static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
5730{
5731 int irq, pos;
5732
5733 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5734
5735again:
5736 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5737
5738 if (pos >= PCIE_MSI_NR_IRQS)
5739 return -ENOSPC;
5740
5741 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5742
5743 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5744 goto again;
5745 else
5746 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5747
5748 if (pos >= MSM_PCIE_MAX_MSI) {
5749 PCIE_ERR(dev,
5750 "PCIe: RC%d: pos %d is not less than %d\n",
5751 dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
5752 return MSM_PCIE_ERROR;
5753 }
5754
5755 irq = dev->msi[pos].num;
5756 if (!irq) {
5757 PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
5758 dev->rc_idx);
5759 return -EINVAL;
5760 }
5761
5762 return irq;
5763}
5764
5765static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
5766 struct msi_desc *desc, int nvec)
5767{
5768 int irq, index, firstirq = 0;
5769 struct msi_msg msg;
5770 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5771
5772 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5773
5774 for (index = 0; index < nvec; index++) {
5775 irq = msm_pcie_create_irq_qgic(dev);
5776 PCIE_DBG(dev, "irq %d is allocated\n", irq);
5777
5778 if (irq < 0)
5779 return irq;
5780
5781 if (index == 0)
5782 firstirq = irq;
5783
5784 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
5785 }
5786
5787 /* write msi vector and data */
5788 irq_set_msi_desc(firstirq, desc);
5789 msg.address_hi = 0;
5790 msg.address_lo = dev->msi_gicm_addr;
5791 msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
5792 write_msi_msg(firstirq, &msg);
5793
5794 return 0;
5795}
5796
5797int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
5798{
5799 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5800
5801 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5802
5803 if (dev->msi_gicm_addr)
5804 return arch_setup_msi_irq_qgic(pdev, desc, 1);
5805 else
5806 return arch_setup_msi_irq_default(pdev, desc, 1);
5807}
5808
5809static int msm_pcie_get_msi_multiple(int nvec)
5810{
5811 int msi_multiple = 0;
5812
5813 while (nvec) {
5814 nvec = nvec >> 1;
5815 msi_multiple++;
5816 }
5817 PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
5818 msi_multiple - 1);
5819
5820 return msi_multiple - 1;
5821}
5822
5823int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
5824{
5825 struct msi_desc *entry;
5826 int ret;
5827 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5828
5829 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5830
5831 if (type != PCI_CAP_ID_MSI || nvec > 32)
5832 return -ENOSPC;
5833
5834 PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
5835
5836 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5837 entry->msi_attrib.multiple =
5838 msm_pcie_get_msi_multiple(nvec);
5839
5840 if (pcie_dev->msi_gicm_addr)
5841 ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
5842 else
5843 ret = arch_setup_msi_irq_default(dev, entry, nvec);
5844
5845 PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
5846
5847 if (ret < 0)
5848 return ret;
5849 if (ret > 0)
5850 return -ENOSPC;
5851 }
5852
5853 pcie_dev->use_msi = true;
5854
5855 return 0;
5856}
5857
5858static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
5859 irq_hw_number_t hwirq)
5860{
5861 irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
5862 irq_set_chip_data(irq, domain->host_data);
5863 return 0;
5864}
5865
5866static const struct irq_domain_ops msm_pcie_msi_ops = {
5867 .map = msm_pcie_msi_map,
5868};
5869
5870int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
5871{
5872 int rc;
5873 int msi_start = 0;
5874 struct device *pdev = &dev->pdev->dev;
5875
5876 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5877
5878 if (dev->rc_idx)
5879 wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
5880 else
5881 wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
5882
5883 /* register handler for linkdown interrupt */
5884 if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
5885 rc = devm_request_irq(pdev,
5886 dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
5887 handle_linkdown_irq,
5888 IRQF_TRIGGER_RISING,
5889 dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
5890 dev);
5891 if (rc) {
5892 PCIE_ERR(dev,
5893 "PCIe: Unable to request linkdown interrupt:%d\n",
5894 dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
5895 return rc;
5896 }
5897 }
5898
5899 /* register handler for physical MSI interrupt line */
5900 if (dev->irq[MSM_PCIE_INT_MSI].num) {
5901 rc = devm_request_irq(pdev,
5902 dev->irq[MSM_PCIE_INT_MSI].num,
5903 handle_msi_irq,
5904 IRQF_TRIGGER_RISING,
5905 dev->irq[MSM_PCIE_INT_MSI].name,
5906 dev);
5907 if (rc) {
5908 PCIE_ERR(dev,
5909 "PCIe: RC%d: Unable to request MSI interrupt\n",
5910 dev->rc_idx);
5911 return rc;
5912 }
5913 }
5914
5915 /* register handler for AER interrupt */
5916 if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
5917 rc = devm_request_irq(pdev,
5918 dev->irq[MSM_PCIE_INT_PLS_ERR].num,
5919 handle_aer_irq,
5920 IRQF_TRIGGER_RISING,
5921 dev->irq[MSM_PCIE_INT_PLS_ERR].name,
5922 dev);
5923 if (rc) {
5924 PCIE_ERR(dev,
5925 "PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
5926 dev->rc_idx,
5927 dev->irq[MSM_PCIE_INT_PLS_ERR].num);
5928 return rc;
5929 }
5930 }
5931
5932 /* register handler for AER legacy interrupt */
5933 if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
5934 rc = devm_request_irq(pdev,
5935 dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
5936 handle_aer_irq,
5937 IRQF_TRIGGER_RISING,
5938 dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
5939 dev);
5940 if (rc) {
5941 PCIE_ERR(dev,
5942 "PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
5943 dev->rc_idx,
5944 dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
5945 return rc;
5946 }
5947 }
5948
5949 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
5950 rc = devm_request_irq(pdev,
5951 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
5952 handle_global_irq,
5953 IRQF_TRIGGER_RISING,
5954 dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
5955 dev);
5956 if (rc) {
5957 PCIE_ERR(dev,
5958 "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
5959 dev->rc_idx,
5960 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
5961 return rc;
5962 }
5963 }
5964
5965 /* register handler for PCIE_WAKE_N interrupt line */
5966 if (dev->wake_n) {
5967 rc = devm_request_irq(pdev,
5968 dev->wake_n, handle_wake_irq,
5969 IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
5970 if (rc) {
5971 PCIE_ERR(dev,
5972 "PCIe: RC%d: Unable to request wake interrupt\n",
5973 dev->rc_idx);
5974 return rc;
5975 }
5976
5977 INIT_WORK(&dev->handle_wake_work, handle_wake_func);
5978
5979 rc = enable_irq_wake(dev->wake_n);
5980 if (rc) {
5981 PCIE_ERR(dev,
5982 "PCIe: RC%d: Unable to enable wake interrupt\n",
5983 dev->rc_idx);
5984 return rc;
5985 }
5986 }
5987
5988 /* Create a virtual domain of interrupts */
5989 if (!dev->msi_gicm_addr) {
5990 dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
5991 PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
5992
5993 if (!dev->irq_domain) {
5994 PCIE_ERR(dev,
5995 "PCIe: RC%d: Unable to initialize irq domain\n",
5996 dev->rc_idx);
5997
5998 if (dev->wake_n)
5999 disable_irq(dev->wake_n);
6000
6001 return PTR_ERR(dev->irq_domain);
6002 }
6003
6004 msi_start = irq_create_mapping(dev->irq_domain, 0);
6005 }
6006
6007 return 0;
6008}
6009
6010void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
6011{
6012 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
6013
6014 wakeup_source_trash(&dev->ws);
6015
6016 if (dev->wake_n)
6017 disable_irq(dev->wake_n);
6018}
6019
6020
6021static int msm_pcie_probe(struct platform_device *pdev)
6022{
6023 int ret = 0;
6024 int rc_idx = -1;
6025 int i, j;
6026
6027 PCIE_GEN_DBG("%s\n", __func__);
6028
6029 mutex_lock(&pcie_drv.drv_lock);
6030
6031 ret = of_property_read_u32((&pdev->dev)->of_node,
6032 "cell-index", &rc_idx);
6033 if (ret) {
6034 PCIE_GEN_DBG("Did not find RC index.\n");
6035 goto out;
6036 } else {
6037 if (rc_idx >= MAX_RC_NUM) {
6038 pr_err(
6039 "PCIe: Invalid RC Index %d (max supported = %d)\n",
6040 rc_idx, MAX_RC_NUM);
6041 goto out;
6042 }
6043 pcie_drv.rc_num++;
6044 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
6045 rc_idx);
6046 }
6047
6048 msm_pcie_dev[rc_idx].l0s_supported =
6049 of_property_read_bool((&pdev->dev)->of_node,
6050 "qcom,l0s-supported");
6051 PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
6052 msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
6053 msm_pcie_dev[rc_idx].l1_supported =
6054 of_property_read_bool((&pdev->dev)->of_node,
6055 "qcom,l1-supported");
6056 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
6057 msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
6058 msm_pcie_dev[rc_idx].l1ss_supported =
6059 of_property_read_bool((&pdev->dev)->of_node,
6060 "qcom,l1ss-supported");
6061 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
6062 msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
6063 msm_pcie_dev[rc_idx].common_clk_en =
6064 of_property_read_bool((&pdev->dev)->of_node,
6065 "qcom,common-clk-en");
6066 PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
6067 msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
6068 msm_pcie_dev[rc_idx].clk_power_manage_en =
6069 of_property_read_bool((&pdev->dev)->of_node,
6070 "qcom,clk-power-manage-en");
6071 PCIE_DBG(&msm_pcie_dev[rc_idx],
6072 "Clock power management is %s enabled.\n",
6073 msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
6074 msm_pcie_dev[rc_idx].aux_clk_sync =
6075 of_property_read_bool((&pdev->dev)->of_node,
6076 "qcom,aux-clk-sync");
6077 PCIE_DBG(&msm_pcie_dev[rc_idx],
6078 "AUX clock is %s synchronous to Core clock.\n",
6079 msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
6080
6081 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
6082 of_property_read_bool((&pdev->dev)->of_node,
6083 "qcom,use-19p2mhz-aux-clk");
6084 PCIE_DBG(&msm_pcie_dev[rc_idx],
6085 "AUX clock frequency is %s 19.2MHz.\n",
6086 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
6087
6088 msm_pcie_dev[rc_idx].smmu_exist =
6089 of_property_read_bool((&pdev->dev)->of_node,
6090 "qcom,smmu-exist");
6091 PCIE_DBG(&msm_pcie_dev[rc_idx],
6092 "SMMU does %s exist.\n",
6093 msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
6094
6095 msm_pcie_dev[rc_idx].smmu_sid_base = 0;
6096 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
6097 &msm_pcie_dev[rc_idx].smmu_sid_base);
6098 if (ret)
6099 PCIE_DBG(&msm_pcie_dev[rc_idx],
6100 "RC%d SMMU sid base not found\n",
6101 msm_pcie_dev[rc_idx].rc_idx);
6102 else
6103 PCIE_DBG(&msm_pcie_dev[rc_idx],
6104 "RC%d: qcom,smmu-sid-base: 0x%x.\n",
6105 msm_pcie_dev[rc_idx].rc_idx,
6106 msm_pcie_dev[rc_idx].smmu_sid_base);
6107
6108 msm_pcie_dev[rc_idx].ep_wakeirq =
6109 of_property_read_bool((&pdev->dev)->of_node,
6110 "qcom,ep-wakeirq");
6111 PCIE_DBG(&msm_pcie_dev[rc_idx],
6112 "PCIe: EP of RC%d does %s assert wake when it is up.\n",
6113 rc_idx, msm_pcie_dev[rc_idx].ep_wakeirq ? "" : "not");
6114
6115 msm_pcie_dev[rc_idx].phy_ver = 1;
6116 ret = of_property_read_u32((&pdev->dev)->of_node,
6117 "qcom,pcie-phy-ver",
6118 &msm_pcie_dev[rc_idx].phy_ver);
6119 if (ret)
6120 PCIE_DBG(&msm_pcie_dev[rc_idx],
6121 "RC%d: pcie-phy-ver does not exist.\n",
6122 msm_pcie_dev[rc_idx].rc_idx);
6123 else
6124 PCIE_DBG(&msm_pcie_dev[rc_idx],
6125 "RC%d: pcie-phy-ver: %d.\n",
6126 msm_pcie_dev[rc_idx].rc_idx,
6127 msm_pcie_dev[rc_idx].phy_ver);
6128
6129 msm_pcie_dev[rc_idx].n_fts = 0;
6130 ret = of_property_read_u32((&pdev->dev)->of_node,
6131 "qcom,n-fts",
6132 &msm_pcie_dev[rc_idx].n_fts);
6133
6134 if (ret)
6135 PCIE_DBG(&msm_pcie_dev[rc_idx],
6136 "n-fts does not exist. ret=%d\n", ret);
6137 else
6138 PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
6139 msm_pcie_dev[rc_idx].n_fts);
6140
6141 msm_pcie_dev[rc_idx].common_phy =
6142 of_property_read_bool((&pdev->dev)->of_node,
6143 "qcom,common-phy");
6144 PCIE_DBG(&msm_pcie_dev[rc_idx],
6145 "PCIe: RC%d: Common PHY does %s exist.\n",
6146 rc_idx, msm_pcie_dev[rc_idx].common_phy ? "" : "not");
6147
6148 msm_pcie_dev[rc_idx].ext_ref_clk =
6149 of_property_read_bool((&pdev->dev)->of_node,
6150 "qcom,ext-ref-clk");
6151 PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
6152 msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
6153
6154 msm_pcie_dev[rc_idx].ep_latency = 0;
6155 ret = of_property_read_u32((&pdev->dev)->of_node,
6156 "qcom,ep-latency",
6157 &msm_pcie_dev[rc_idx].ep_latency);
6158 if (ret)
6159 PCIE_DBG(&msm_pcie_dev[rc_idx],
6160 "RC%d: ep-latency does not exist.\n",
6161 rc_idx);
6162 else
6163 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
6164 rc_idx, msm_pcie_dev[rc_idx].ep_latency);
6165
6166 msm_pcie_dev[rc_idx].wr_halt_size = 0;
6167 ret = of_property_read_u32(pdev->dev.of_node,
6168 "qcom,wr-halt-size",
6169 &msm_pcie_dev[rc_idx].wr_halt_size);
6170 if (ret)
6171 PCIE_DBG(&msm_pcie_dev[rc_idx],
6172 "RC%d: wr-halt-size not specified in dt. Use default value.\n",
6173 rc_idx);
6174 else
6175 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
6176 rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
6177
6178 msm_pcie_dev[rc_idx].cpl_timeout = 0;
6179 ret = of_property_read_u32((&pdev->dev)->of_node,
6180 "qcom,cpl-timeout",
6181 &msm_pcie_dev[rc_idx].cpl_timeout);
6182 if (ret)
6183 PCIE_DBG(&msm_pcie_dev[rc_idx],
6184 "RC%d: Using default cpl-timeout.\n",
6185 rc_idx);
6186 else
6187 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
6188 rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
6189
6190 msm_pcie_dev[rc_idx].perst_delay_us_min =
6191 PERST_PROPAGATION_DELAY_US_MIN;
6192 ret = of_property_read_u32(pdev->dev.of_node,
6193 "qcom,perst-delay-us-min",
6194 &msm_pcie_dev[rc_idx].perst_delay_us_min);
6195 if (ret)
6196 PCIE_DBG(&msm_pcie_dev[rc_idx],
6197 "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
6198 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6199 else
6200 PCIE_DBG(&msm_pcie_dev[rc_idx],
6201 "RC%d: perst-delay-us-min: %dus.\n",
6202 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
6203
6204 msm_pcie_dev[rc_idx].perst_delay_us_max =
6205 PERST_PROPAGATION_DELAY_US_MAX;
6206 ret = of_property_read_u32(pdev->dev.of_node,
6207 "qcom,perst-delay-us-max",
6208 &msm_pcie_dev[rc_idx].perst_delay_us_max);
6209 if (ret)
6210 PCIE_DBG(&msm_pcie_dev[rc_idx],
6211 "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
6212 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6213 else
6214 PCIE_DBG(&msm_pcie_dev[rc_idx],
6215 "RC%d: perst-delay-us-max: %dus.\n",
6216 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
6217
6218 msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
6219 ret = of_property_read_u32(pdev->dev.of_node,
6220 "qcom,tlp-rd-size",
6221 &msm_pcie_dev[rc_idx].tlp_rd_size);
6222 if (ret)
6223 PCIE_DBG(&msm_pcie_dev[rc_idx],
6224 "RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
6225 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6226 else
6227 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
6228 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
6229
6230 msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
6231 msm_pcie_dev[rc_idx].msi_gicm_base = 0;
6232 ret = of_property_read_u32((&pdev->dev)->of_node,
6233 "qcom,msi-gicm-addr",
6234 &msm_pcie_dev[rc_idx].msi_gicm_addr);
6235
6236 if (ret) {
6237 PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
6238 "msi-gicm-addr does not exist.\n");
6239 } else {
6240 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
6241 msm_pcie_dev[rc_idx].msi_gicm_addr);
6242
6243 ret = of_property_read_u32((&pdev->dev)->of_node,
6244 "qcom,msi-gicm-base",
6245 &msm_pcie_dev[rc_idx].msi_gicm_base);
6246
6247 if (ret) {
6248 PCIE_ERR(&msm_pcie_dev[rc_idx],
6249 "PCIe: RC%d: msi-gicm-base does not exist.\n",
6250 rc_idx);
6251 goto decrease_rc_num;
6252 } else {
6253 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
6254 msm_pcie_dev[rc_idx].msi_gicm_base);
6255 }
6256 }
6257
6258 msm_pcie_dev[rc_idx].scm_dev_id = 0;
6259 ret = of_property_read_u32((&pdev->dev)->of_node,
6260 "qcom,scm-dev-id",
6261 &msm_pcie_dev[rc_idx].scm_dev_id);
6262
6263 msm_pcie_dev[rc_idx].rc_idx = rc_idx;
6264 msm_pcie_dev[rc_idx].pdev = pdev;
6265 msm_pcie_dev[rc_idx].vreg_n = 0;
6266 msm_pcie_dev[rc_idx].gpio_n = 0;
6267 msm_pcie_dev[rc_idx].parf_deemph = 0;
6268 msm_pcie_dev[rc_idx].parf_swing = 0;
6269 msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
6270 msm_pcie_dev[rc_idx].user_suspend = false;
6271 msm_pcie_dev[rc_idx].disable_pc = false;
6272 msm_pcie_dev[rc_idx].saved_state = NULL;
6273 msm_pcie_dev[rc_idx].enumerated = false;
6274 msm_pcie_dev[rc_idx].num_active_ep = 0;
6275 msm_pcie_dev[rc_idx].num_ep = 0;
6276 msm_pcie_dev[rc_idx].pending_ep_reg = false;
6277 msm_pcie_dev[rc_idx].phy_len = 0;
6278 msm_pcie_dev[rc_idx].port_phy_len = 0;
6279 msm_pcie_dev[rc_idx].phy_sequence = NULL;
6280 msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
6281 msm_pcie_dev[rc_idx].event_reg = NULL;
6282 msm_pcie_dev[rc_idx].linkdown_counter = 0;
6283 msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
6284 msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
6285 msm_pcie_dev[rc_idx].rc_corr_counter = 0;
6286 msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
6287 msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
6288 msm_pcie_dev[rc_idx].ep_corr_counter = 0;
6289 msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
6290 msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
6291 msm_pcie_dev[rc_idx].suspending = false;
6292 msm_pcie_dev[rc_idx].wake_counter = 0;
6293 msm_pcie_dev[rc_idx].aer_enable = true;
6294 msm_pcie_dev[rc_idx].power_on = false;
6295 msm_pcie_dev[rc_idx].current_short_bdf = 0;
6296 msm_pcie_dev[rc_idx].use_msi = false;
6297 msm_pcie_dev[rc_idx].use_pinctrl = false;
6298 msm_pcie_dev[rc_idx].linkdown_panic = false;
6299 msm_pcie_dev[rc_idx].bridge_found = false;
6300 memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
6301 sizeof(msm_pcie_vreg_info));
6302 memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
6303 sizeof(msm_pcie_gpio_info));
6304 memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
6305 sizeof(msm_pcie_clk_info[rc_idx]));
6306 memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
6307 sizeof(msm_pcie_pipe_clk_info[rc_idx]));
6308 memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
6309 sizeof(msm_pcie_res_info));
6310 memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
6311 sizeof(msm_pcie_irq_info));
6312 memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
6313 sizeof(msm_pcie_msi_info));
6314 memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
6315 sizeof(msm_pcie_reset_info[rc_idx]));
6316 memcpy(msm_pcie_dev[rc_idx].pipe_reset,
6317 msm_pcie_pipe_reset_info[rc_idx],
6318 sizeof(msm_pcie_pipe_reset_info[rc_idx]));
6319 msm_pcie_dev[rc_idx].shadow_en = true;
6320 for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
6321 msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
6322 for (i = 0; i < MAX_DEVICE_NUM; i++)
6323 for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
6324 msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
6325 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6326 msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
6327 msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
6328 msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
6329 msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
6330 msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
6331 msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0;
6332 msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
6333 msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
6334 msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
6335 msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
6336 }
6337
Tony Truongbd9a3412017-02-27 18:30:13 -08006338 dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
6339 msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
6340
Tony Truong349ee492014-10-01 17:35:56 -07006341 ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
6342 msm_pcie_dev[rc_idx].pdev);
6343
6344 if (ret)
6345 goto decrease_rc_num;
6346
6347 msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
6348 if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
6349 PCIE_ERR(&msm_pcie_dev[rc_idx],
6350 "PCIe: RC%d failed to get pinctrl\n",
6351 rc_idx);
6352 else
6353 msm_pcie_dev[rc_idx].use_pinctrl = true;
6354
6355 if (msm_pcie_dev[rc_idx].use_pinctrl) {
6356 msm_pcie_dev[rc_idx].pins_default =
6357 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6358 "default");
6359 if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
6360 PCIE_ERR(&msm_pcie_dev[rc_idx],
6361 "PCIe: RC%d could not get pinctrl default state\n",
6362 rc_idx);
6363 msm_pcie_dev[rc_idx].pins_default = NULL;
6364 }
6365
6366 msm_pcie_dev[rc_idx].pins_sleep =
6367 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6368 "sleep");
6369 if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
6370 PCIE_ERR(&msm_pcie_dev[rc_idx],
6371 "PCIe: RC%d could not get pinctrl sleep state\n",
6372 rc_idx);
6373 msm_pcie_dev[rc_idx].pins_sleep = NULL;
6374 }
6375 }
6376
6377 ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
6378 if (ret) {
6379 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6380 goto decrease_rc_num;
6381 }
6382
6383 ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
6384 if (ret) {
6385 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6386 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6387 goto decrease_rc_num;
6388 }
6389
6390 msm_pcie_dev[rc_idx].drv_ready = true;
6391
6392 if (msm_pcie_dev[rc_idx].ep_wakeirq) {
6393 PCIE_DBG(&msm_pcie_dev[rc_idx],
6394 "PCIe: RC%d will be enumerated upon WAKE signal from Endpoint.\n",
6395 rc_idx);
6396 mutex_unlock(&pcie_drv.drv_lock);
6397 return 0;
6398 }
6399
6400 ret = msm_pcie_enumerate(rc_idx);
6401
6402 if (ret)
6403 PCIE_ERR(&msm_pcie_dev[rc_idx],
6404 "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
6405 rc_idx);
6406 else
6407 PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
6408 rc_idx);
6409
6410 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
6411 dev_name(&(pdev->dev)));
6412
6413 mutex_unlock(&pcie_drv.drv_lock);
6414 return 0;
6415
6416decrease_rc_num:
6417 pcie_drv.rc_num--;
6418out:
6419 if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
6420 pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
6421 rc_idx);
6422 else
6423 PCIE_ERR(&msm_pcie_dev[rc_idx],
6424 "PCIe: Driver probe failed for RC%d:%d\n",
6425 rc_idx, ret);
6426
6427 mutex_unlock(&pcie_drv.drv_lock);
6428
6429 return ret;
6430}
6431
6432static int msm_pcie_remove(struct platform_device *pdev)
6433{
6434 int ret = 0;
6435 int rc_idx;
6436
6437 PCIE_GEN_DBG("PCIe:%s.\n", __func__);
6438
6439 mutex_lock(&pcie_drv.drv_lock);
6440
6441 ret = of_property_read_u32((&pdev->dev)->of_node,
6442 "cell-index", &rc_idx);
6443 if (ret) {
6444 pr_err("%s: Did not find RC index.\n", __func__);
6445 goto out;
6446 } else {
6447 pcie_drv.rc_num--;
6448 PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
6449 }
6450
6451 msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
6452 msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
6453 msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
6454 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6455 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6456
6457out:
6458 mutex_unlock(&pcie_drv.drv_lock);
6459
6460 return ret;
6461}
6462
6463static const struct of_device_id msm_pcie_match[] = {
6464 { .compatible = "qcom,pci-msm",
6465 },
6466 {}
6467};
6468
6469static struct platform_driver msm_pcie_driver = {
6470 .probe = msm_pcie_probe,
6471 .remove = msm_pcie_remove,
6472 .driver = {
6473 .name = "pci-msm",
6474 .owner = THIS_MODULE,
6475 .of_match_table = msm_pcie_match,
6476 },
6477};
6478
6479int __init pcie_init(void)
6480{
6481 int ret = 0, i;
6482 char rc_name[MAX_RC_NAME_LEN];
6483
6484 pr_alert("pcie:%s.\n", __func__);
6485
6486 pcie_drv.rc_num = 0;
6487 mutex_init(&pcie_drv.drv_lock);
6488 mutex_init(&com_phy_lock);
6489
6490 for (i = 0; i < MAX_RC_NUM; i++) {
6491 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
6492 msm_pcie_dev[i].ipc_log =
6493 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6494 if (msm_pcie_dev[i].ipc_log == NULL)
6495 pr_err("%s: unable to create IPC log context for %s\n",
6496 __func__, rc_name);
6497 else
6498 PCIE_DBG(&msm_pcie_dev[i],
6499 "PCIe IPC logging is enable for RC%d\n",
6500 i);
6501 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
6502 msm_pcie_dev[i].ipc_log_long =
6503 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6504 if (msm_pcie_dev[i].ipc_log_long == NULL)
6505 pr_err("%s: unable to create IPC log context for %s\n",
6506 __func__, rc_name);
6507 else
6508 PCIE_DBG(&msm_pcie_dev[i],
6509 "PCIe IPC logging %s is enable for RC%d\n",
6510 rc_name, i);
6511 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
6512 msm_pcie_dev[i].ipc_log_dump =
6513 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6514 if (msm_pcie_dev[i].ipc_log_dump == NULL)
6515 pr_err("%s: unable to create IPC log context for %s\n",
6516 __func__, rc_name);
6517 else
6518 PCIE_DBG(&msm_pcie_dev[i],
6519 "PCIe IPC logging %s is enable for RC%d\n",
6520 rc_name, i);
6521 spin_lock_init(&msm_pcie_dev[i].cfg_lock);
6522 msm_pcie_dev[i].cfg_access = true;
6523 mutex_init(&msm_pcie_dev[i].enumerate_lock);
6524 mutex_init(&msm_pcie_dev[i].setup_lock);
6525 mutex_init(&msm_pcie_dev[i].recovery_lock);
6526 spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
6527 spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
6528 spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
6529 spin_lock_init(&msm_pcie_dev[i].aer_lock);
6530 msm_pcie_dev[i].drv_ready = false;
6531 }
6532 for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
6533 msm_pcie_dev_tbl[i].bdf = 0;
6534 msm_pcie_dev_tbl[i].dev = NULL;
6535 msm_pcie_dev_tbl[i].short_bdf = 0;
6536 msm_pcie_dev_tbl[i].sid = 0;
6537 msm_pcie_dev_tbl[i].domain = -1;
6538 msm_pcie_dev_tbl[i].conf_base = 0;
6539 msm_pcie_dev_tbl[i].phy_address = 0;
6540 msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
6541 msm_pcie_dev_tbl[i].event_reg = NULL;
6542 msm_pcie_dev_tbl[i].registered = true;
6543 }
6544
6545 msm_pcie_debugfs_init();
6546
6547 ret = platform_driver_register(&msm_pcie_driver);
6548
6549 return ret;
6550}
6551
6552static void __exit pcie_exit(void)
6553{
Tony Truongbd9a3412017-02-27 18:30:13 -08006554 int i;
6555
Tony Truong349ee492014-10-01 17:35:56 -07006556 PCIE_GEN_DBG("pcie:%s.\n", __func__);
6557
6558 platform_driver_unregister(&msm_pcie_driver);
6559
6560 msm_pcie_debugfs_exit();
Tony Truongbd9a3412017-02-27 18:30:13 -08006561
6562 for (i = 0; i < MAX_RC_NUM; i++)
6563 msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
Tony Truong349ee492014-10-01 17:35:56 -07006564}
6565
6566subsys_initcall_sync(pcie_init);
6567module_exit(pcie_exit);
6568
6569
6570/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
6571static void msm_pcie_fixup_early(struct pci_dev *dev)
6572{
6573 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6574
6575 PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
6576 if (dev->hdr_type == 1)
6577 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
6578}
6579DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6580 msm_pcie_fixup_early);
6581
6582/* Suspend the PCIe link */
6583static int msm_pcie_pm_suspend(struct pci_dev *dev,
6584 void *user, void *data, u32 options)
6585{
6586 int ret = 0;
6587 u32 val = 0;
6588 int ret_l23;
6589 unsigned long irqsave_flags;
6590 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6591
6592 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6593
6594 spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
6595 pcie_dev->suspending = true;
6596 spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
6597
6598 if (!pcie_dev->power_on) {
6599 PCIE_DBG(pcie_dev,
6600 "PCIe: power of RC%d has been turned off.\n",
6601 pcie_dev->rc_idx);
6602 return ret;
6603 }
6604
6605 if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
6606 && msm_pcie_confirm_linkup(pcie_dev, true, true,
6607 pcie_dev->conf)) {
6608 ret = pci_save_state(dev);
6609 pcie_dev->saved_state = pci_store_saved_state(dev);
6610 }
6611 if (ret) {
6612 PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
6613 pcie_dev->rc_idx, ret);
6614 pcie_dev->suspending = false;
6615 return ret;
6616 }
6617
6618 spin_lock_irqsave(&pcie_dev->cfg_lock,
6619 pcie_dev->irqsave_flags);
6620 pcie_dev->cfg_access = false;
6621 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6622 pcie_dev->irqsave_flags);
6623
6624 msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
6625 BIT(4));
6626
6627 PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
6628 pcie_dev->rc_idx);
6629
6630 ret_l23 = readl_poll_timeout((pcie_dev->parf
6631 + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
6632
6633 /* check L23_Ready */
6634 PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
6635 pcie_dev->rc_idx,
6636 readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
6637 if (!ret_l23)
6638 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
6639 pcie_dev->rc_idx);
6640 else
6641 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
6642 pcie_dev->rc_idx);
6643
6644 msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6645
6646 if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
6647 pinctrl_select_state(pcie_dev->pinctrl,
6648 pcie_dev->pins_sleep);
6649
6650 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6651
6652 return ret;
6653}
6654
6655static void msm_pcie_fixup_suspend(struct pci_dev *dev)
6656{
6657 int ret;
6658 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6659
6660 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6661
6662 if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
6663 return;
6664
6665 spin_lock_irqsave(&pcie_dev->cfg_lock,
6666 pcie_dev->irqsave_flags);
6667 if (pcie_dev->disable_pc) {
6668 PCIE_DBG(pcie_dev,
6669 "RC%d: Skip suspend because of user request\n",
6670 pcie_dev->rc_idx);
6671 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6672 pcie_dev->irqsave_flags);
6673 return;
6674 }
6675 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6676 pcie_dev->irqsave_flags);
6677
6678 mutex_lock(&pcie_dev->recovery_lock);
6679
6680 ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
6681 if (ret)
6682 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
6683 pcie_dev->rc_idx, ret);
6684
6685 mutex_unlock(&pcie_dev->recovery_lock);
6686}
6687DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6688 msm_pcie_fixup_suspend);
6689
6690/* Resume the PCIe link */
6691static int msm_pcie_pm_resume(struct pci_dev *dev,
6692 void *user, void *data, u32 options)
6693{
6694 int ret;
6695 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6696
6697 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6698
6699 if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
6700 pinctrl_select_state(pcie_dev->pinctrl,
6701 pcie_dev->pins_default);
6702
6703 spin_lock_irqsave(&pcie_dev->cfg_lock,
6704 pcie_dev->irqsave_flags);
6705 pcie_dev->cfg_access = true;
6706 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6707 pcie_dev->irqsave_flags);
6708
6709 ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6710 if (ret) {
6711 PCIE_ERR(pcie_dev,
6712 "PCIe: RC%d fail to enable PCIe link in resume.\n",
6713 pcie_dev->rc_idx);
6714 return ret;
6715 }
6716
6717 pcie_dev->suspending = false;
6718 PCIE_DBG(pcie_dev,
6719 "dev->bus->number = %d dev->bus->primary = %d\n",
6720 dev->bus->number, dev->bus->primary);
6721
6722 if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
6723 PCIE_DBG(pcie_dev,
6724 "RC%d: entry of PCI framework restore state\n",
6725 pcie_dev->rc_idx);
6726
6727 pci_load_and_free_saved_state(dev,
6728 &pcie_dev->saved_state);
6729 pci_restore_state(dev);
6730
6731 PCIE_DBG(pcie_dev,
6732 "RC%d: exit of PCI framework restore state\n",
6733 pcie_dev->rc_idx);
6734 }
6735
6736 if (pcie_dev->bridge_found) {
6737 PCIE_DBG(pcie_dev,
6738 "RC%d: entry of PCIe recover config\n",
6739 pcie_dev->rc_idx);
6740
6741 msm_pcie_recover_config(dev);
6742
6743 PCIE_DBG(pcie_dev,
6744 "RC%d: exit of PCIe recover config\n",
6745 pcie_dev->rc_idx);
6746 }
6747
6748 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6749
6750 return ret;
6751}
6752
6753void msm_pcie_fixup_resume(struct pci_dev *dev)
6754{
6755 int ret;
6756 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6757
6758 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6759
6760 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6761 pcie_dev->user_suspend)
6762 return;
6763
6764 mutex_lock(&pcie_dev->recovery_lock);
6765 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6766 if (ret)
6767 PCIE_ERR(pcie_dev,
6768 "PCIe: RC%d got failure in fixup resume:%d.\n",
6769 pcie_dev->rc_idx, ret);
6770
6771 mutex_unlock(&pcie_dev->recovery_lock);
6772}
6773DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6774 msm_pcie_fixup_resume);
6775
6776void msm_pcie_fixup_resume_early(struct pci_dev *dev)
6777{
6778 int ret;
6779 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6780
6781 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6782
6783 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6784 pcie_dev->user_suspend)
6785 return;
6786
6787 mutex_lock(&pcie_dev->recovery_lock);
6788 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6789 if (ret)
6790 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
6791 pcie_dev->rc_idx, ret);
6792
6793 mutex_unlock(&pcie_dev->recovery_lock);
6794}
6795DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6796 msm_pcie_fixup_resume_early);
6797
6798int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
6799 void *data, u32 options)
6800{
6801 int i, ret = 0;
6802 struct pci_dev *dev;
6803 u32 rc_idx = 0;
6804 struct msm_pcie_dev_t *pcie_dev;
6805
6806 PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
6807 pm_opt, busnr, options);
6808
6809
6810 if (!user) {
6811 pr_err("PCIe: endpoint device is NULL\n");
6812 ret = -ENODEV;
6813 goto out;
6814 }
6815
6816 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
6817
6818 if (pcie_dev) {
6819 rc_idx = pcie_dev->rc_idx;
6820 PCIE_DBG(pcie_dev,
6821 "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
6822 rc_idx, pm_opt, busnr, options);
6823 } else {
6824 pr_err(
6825 "PCIe: did not find RC for pci endpoint device.\n"
6826 );
6827 ret = -ENODEV;
6828 goto out;
6829 }
6830
6831 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6832 if (!busnr)
6833 break;
6834 if (user == pcie_dev->pcidev_table[i].dev) {
6835 if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
6836 break;
6837
6838 PCIE_ERR(pcie_dev,
6839 "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
6840 pcie_dev->rc_idx, busnr,
6841 pcie_dev->pcidev_table[i].bdf >> 24);
6842 ret = MSM_PCIE_ERROR;
6843 goto out;
6844 }
6845 }
6846
6847 if (i == MAX_DEVICE_NUM) {
6848 PCIE_ERR(pcie_dev,
6849 "PCIe: RC%d: endpoint device was not found in device table",
6850 pcie_dev->rc_idx);
6851 ret = MSM_PCIE_ERROR;
6852 goto out;
6853 }
6854
6855 dev = msm_pcie_dev[rc_idx].dev;
6856
6857 if (!msm_pcie_dev[rc_idx].drv_ready) {
6858 PCIE_ERR(&msm_pcie_dev[rc_idx],
6859 "RC%d has not been successfully probed yet\n",
6860 rc_idx);
6861 return -EPROBE_DEFER;
6862 }
6863
6864 switch (pm_opt) {
6865 case MSM_PCIE_SUSPEND:
6866 PCIE_DBG(&msm_pcie_dev[rc_idx],
6867 "User of RC%d requests to suspend the link\n", rc_idx);
6868 if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
6869 PCIE_DBG(&msm_pcie_dev[rc_idx],
6870 "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
6871 rc_idx, msm_pcie_dev[rc_idx].link_status);
6872
6873 if (!msm_pcie_dev[rc_idx].power_on) {
6874 PCIE_ERR(&msm_pcie_dev[rc_idx],
6875 "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
6876 rc_idx, msm_pcie_dev[rc_idx].link_status);
6877 break;
6878 }
6879
6880 if (msm_pcie_dev[rc_idx].pending_ep_reg) {
6881 PCIE_DBG(&msm_pcie_dev[rc_idx],
6882 "PCIe: RC%d: request to suspend the link is rejected\n",
6883 rc_idx);
6884 break;
6885 }
6886
6887 if (pcie_dev->num_active_ep) {
6888 PCIE_DBG(pcie_dev,
6889 "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
6890 pcie_dev->rc_idx, pcie_dev->num_active_ep);
6891 return ret;
6892 }
6893
6894 msm_pcie_dev[rc_idx].user_suspend = true;
6895
6896 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6897
6898 ret = msm_pcie_pm_suspend(dev, user, data, options);
6899 if (ret) {
6900 PCIE_ERR(&msm_pcie_dev[rc_idx],
6901 "PCIe: RC%d: user failed to suspend the link.\n",
6902 rc_idx);
6903 msm_pcie_dev[rc_idx].user_suspend = false;
6904 }
6905
6906 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6907 break;
6908 case MSM_PCIE_RESUME:
6909 PCIE_DBG(&msm_pcie_dev[rc_idx],
6910 "User of RC%d requests to resume the link\n", rc_idx);
6911 if (msm_pcie_dev[rc_idx].link_status !=
6912 MSM_PCIE_LINK_DISABLED) {
6913 PCIE_ERR(&msm_pcie_dev[rc_idx],
6914 "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
6915 rc_idx, msm_pcie_dev[rc_idx].link_status,
6916 msm_pcie_dev[rc_idx].num_active_ep);
6917 break;
6918 }
6919
6920 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6921 ret = msm_pcie_pm_resume(dev, user, data, options);
6922 if (ret) {
6923 PCIE_ERR(&msm_pcie_dev[rc_idx],
6924 "PCIe: RC%d: user failed to resume the link.\n",
6925 rc_idx);
6926 } else {
6927 PCIE_DBG(&msm_pcie_dev[rc_idx],
6928 "PCIe: RC%d: user succeeded to resume the link.\n",
6929 rc_idx);
6930
6931 msm_pcie_dev[rc_idx].user_suspend = false;
6932 }
6933
6934 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6935
6936 break;
6937 case MSM_PCIE_DISABLE_PC:
6938 PCIE_DBG(&msm_pcie_dev[rc_idx],
6939 "User of RC%d requests to keep the link always alive.\n",
6940 rc_idx);
6941 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6942 msm_pcie_dev[rc_idx].irqsave_flags);
6943 if (msm_pcie_dev[rc_idx].suspending) {
6944 PCIE_ERR(&msm_pcie_dev[rc_idx],
6945 "PCIe: RC%d Link has been suspended before request\n",
6946 rc_idx);
6947 ret = MSM_PCIE_ERROR;
6948 } else {
6949 msm_pcie_dev[rc_idx].disable_pc = true;
6950 }
6951 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6952 msm_pcie_dev[rc_idx].irqsave_flags);
6953 break;
6954 case MSM_PCIE_ENABLE_PC:
6955 PCIE_DBG(&msm_pcie_dev[rc_idx],
6956 "User of RC%d cancels the request of alive link.\n",
6957 rc_idx);
6958 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6959 msm_pcie_dev[rc_idx].irqsave_flags);
6960 msm_pcie_dev[rc_idx].disable_pc = false;
6961 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6962 msm_pcie_dev[rc_idx].irqsave_flags);
6963 break;
6964 default:
6965 PCIE_ERR(&msm_pcie_dev[rc_idx],
6966 "PCIe: RC%d: unsupported pm operation:%d.\n",
6967 rc_idx, pm_opt);
6968 ret = -ENODEV;
6969 goto out;
6970 }
6971
6972out:
6973 return ret;
6974}
6975EXPORT_SYMBOL(msm_pcie_pm_control);
6976
6977int msm_pcie_register_event(struct msm_pcie_register_event *reg)
6978{
6979 int i, ret = 0;
6980 struct msm_pcie_dev_t *pcie_dev;
6981
6982 if (!reg) {
6983 pr_err("PCIe: Event registration is NULL\n");
6984 return -ENODEV;
6985 }
6986
6987 if (!reg->user) {
6988 pr_err("PCIe: User of event registration is NULL\n");
6989 return -ENODEV;
6990 }
6991
6992 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6993
6994 if (!pcie_dev) {
6995 PCIE_ERR(pcie_dev, "%s",
6996 "PCIe: did not find RC for pci endpoint device.\n");
6997 return -ENODEV;
6998 }
6999
7000 if (pcie_dev->num_ep > 1) {
7001 for (i = 0; i < MAX_DEVICE_NUM; i++) {
7002 if (reg->user ==
7003 pcie_dev->pcidev_table[i].dev) {
7004 pcie_dev->event_reg =
7005 pcie_dev->pcidev_table[i].event_reg;
7006
7007 if (!pcie_dev->event_reg) {
7008 pcie_dev->pcidev_table[i].registered =
7009 true;
7010
7011 pcie_dev->num_active_ep++;
7012 PCIE_DBG(pcie_dev,
7013 "PCIe: RC%d: number of active EP(s): %d.\n",
7014 pcie_dev->rc_idx,
7015 pcie_dev->num_active_ep);
7016 }
7017
7018 pcie_dev->event_reg = reg;
7019 pcie_dev->pcidev_table[i].event_reg = reg;
7020 PCIE_DBG(pcie_dev,
7021 "Event 0x%x is registered for RC %d\n",
7022 reg->events,
7023 pcie_dev->rc_idx);
7024
7025 break;
7026 }
7027 }
7028
7029 if (pcie_dev->pending_ep_reg) {
7030 for (i = 0; i < MAX_DEVICE_NUM; i++)
7031 if (!pcie_dev->pcidev_table[i].registered)
7032 break;
7033
7034 if (i == MAX_DEVICE_NUM)
7035 pcie_dev->pending_ep_reg = false;
7036 }
7037 } else {
7038 pcie_dev->event_reg = reg;
7039 PCIE_DBG(pcie_dev,
7040 "Event 0x%x is registered for RC %d\n", reg->events,
7041 pcie_dev->rc_idx);
7042 }
7043
7044 return ret;
7045}
7046EXPORT_SYMBOL(msm_pcie_register_event);
7047
7048int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
7049{
7050 int i, ret = 0;
7051 struct msm_pcie_dev_t *pcie_dev;
7052
7053 if (!reg) {
7054 pr_err("PCIe: Event deregistration is NULL\n");
7055 return -ENODEV;
7056 }
7057
7058 if (!reg->user) {
7059 pr_err("PCIe: User of event deregistration is NULL\n");
7060 return -ENODEV;
7061 }
7062
7063 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
7064
7065 if (!pcie_dev) {
7066 PCIE_ERR(pcie_dev, "%s",
7067 "PCIe: did not find RC for pci endpoint device.\n");
7068 return -ENODEV;
7069 }
7070
7071 if (pcie_dev->num_ep > 1) {
7072 for (i = 0; i < MAX_DEVICE_NUM; i++) {
7073 if (reg->user == pcie_dev->pcidev_table[i].dev) {
7074 if (pcie_dev->pcidev_table[i].event_reg) {
7075 pcie_dev->num_active_ep--;
7076 PCIE_DBG(pcie_dev,
7077 "PCIe: RC%d: number of active EP(s) left: %d.\n",
7078 pcie_dev->rc_idx,
7079 pcie_dev->num_active_ep);
7080 }
7081
7082 pcie_dev->event_reg = NULL;
7083 pcie_dev->pcidev_table[i].event_reg = NULL;
7084 PCIE_DBG(pcie_dev,
7085 "Event is deregistered for RC %d\n",
7086 pcie_dev->rc_idx);
7087
7088 break;
7089 }
7090 }
7091 } else {
7092 pcie_dev->event_reg = NULL;
7093 PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
7094 pcie_dev->rc_idx);
7095 }
7096
7097 return ret;
7098}
7099EXPORT_SYMBOL(msm_pcie_deregister_event);
7100
7101int msm_pcie_recover_config(struct pci_dev *dev)
7102{
7103 int ret = 0;
7104 struct msm_pcie_dev_t *pcie_dev;
7105
7106 if (dev) {
7107 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7108 PCIE_DBG(pcie_dev,
7109 "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
7110 } else {
7111 pr_err("PCIe: the input pci dev is NULL.\n");
7112 return -ENODEV;
7113 }
7114
7115 if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
7116 PCIE_DBG(pcie_dev,
7117 "Recover config space of RC%d and its EP\n",
7118 pcie_dev->rc_idx);
7119 pcie_dev->shadow_en = false;
7120 PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
7121 msm_pcie_cfg_recover(pcie_dev, true);
7122 PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
7123 msm_pcie_cfg_recover(pcie_dev, false);
7124 PCIE_DBG(pcie_dev,
7125 "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
7126 pcie_dev->rc_idx);
7127 pci_save_state(pcie_dev->dev);
7128 pci_save_state(dev);
7129 pcie_dev->shadow_en = true;
7130 PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
7131 pcie_dev->rc_idx);
7132 } else {
7133 PCIE_ERR(pcie_dev,
7134 "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
7135 pcie_dev->rc_idx);
7136 ret = -ENODEV;
7137 }
7138
7139 return ret;
7140}
7141EXPORT_SYMBOL(msm_pcie_recover_config);
7142
7143int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
7144{
7145 int ret = 0;
7146 struct msm_pcie_dev_t *pcie_dev;
7147
7148 if (dev) {
7149 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
7150 PCIE_DBG(pcie_dev,
7151 "User requests to %s shadow\n",
7152 enable ? "enable" : "disable");
7153 } else {
7154 pr_err("PCIe: the input pci dev is NULL.\n");
7155 return -ENODEV;
7156 }
7157
7158 PCIE_DBG(pcie_dev,
7159 "The shadowing of RC%d is %s enabled currently.\n",
7160 pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
7161
7162 pcie_dev->shadow_en = enable;
7163
7164 PCIE_DBG(pcie_dev,
7165 "Shadowing of RC%d is turned %s upon user's request.\n",
7166 pcie_dev->rc_idx, enable ? "on" : "off");
7167
7168 return ret;
7169}
7170EXPORT_SYMBOL(msm_pcie_shadow_control);