blob: a42ff2c4a5bbd3b74f445b5157ce60771dcda180 [file] [log] [blame]
Tony Truonge023d012017-11-10 13:36:26 -08001/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
Tony Truong349ee492014-10-01 17:35:56 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
Tony Truong74ee0fd2017-10-06 19:37:43 -070022#include <linux/jiffies.h>
Tony Truong349ee492014-10-01 17:35:56 -070023#include <linux/gpio.h>
24#include <linux/iopoll.h>
25#include <linux/kernel.h>
26#include <linux/of_pci.h>
27#include <linux/pci.h>
Tony Truong52122a62017-03-23 18:00:34 -070028#include <linux/iommu.h>
Tony Truong349ee492014-10-01 17:35:56 -070029#include <linux/platform_device.h>
30#include <linux/regulator/consumer.h>
Tony Truongb213ac12017-04-05 15:21:20 -070031#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
Tony Truong349ee492014-10-01 17:35:56 -070032#include <linux/slab.h>
33#include <linux/types.h>
34#include <linux/of_gpio.h>
Tony Truongb213ac12017-04-05 15:21:20 -070035#include <linux/clk/qcom.h>
Tony Truong349ee492014-10-01 17:35:56 -070036#include <linux/reset.h>
37#include <linux/msm-bus.h>
38#include <linux/msm-bus-board.h>
39#include <linux/debugfs.h>
40#include <linux/uaccess.h>
41#include <linux/io.h>
42#include <linux/msi.h>
43#include <linux/interrupt.h>
44#include <linux/irq.h>
45#include <linux/irqdomain.h>
46#include <linux/pm_wakeup.h>
47#include <linux/compiler.h>
48#include <soc/qcom/scm.h>
49#include <linux/ipc_logging.h>
50#include <linux/msm_pcie.h>
51
Tony Truongb213ac12017-04-05 15:21:20 -070052#define PCIE_VENDOR_ID_RCP 0x17cb
53#define PCIE_DEVICE_ID_RCP 0x0106
54
55#define PCIE20_L1SUB_CONTROL1 0x1E4
56#define PCIE20_PARF_DBI_BASE_ADDR 0x350
57#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
58
Tony Truongb213ac12017-04-05 15:21:20 -070059#define PCS_BASE 0x800
Tony Truongb213ac12017-04-05 15:21:20 -070060
Tony Truong232cf4d2017-08-22 18:28:24 -070061#define PCS_PORT(n) (PCS_BASE + n * 0x1000)
Tony Truong349ee492014-10-01 17:35:56 -070062
Tony Truong232cf4d2017-08-22 18:28:24 -070063#define PCIE_N_SW_RESET(n) (PCS_PORT(n) + 0x00)
64#define PCIE_N_POWER_DOWN_CONTROL(n) (PCS_PORT(n) + 0x04)
65#define PCIE_N_PCS_STATUS(n) (PCS_PORT(n) + 0x174)
Tony Truong349ee492014-10-01 17:35:56 -070066
Tony Truongc275fe02017-04-18 19:04:20 -070067#define PCIE_GEN3_COM_INTEGLOOP_GAIN1_MODE0 0x0154
68#define PCIE_GEN3_L0_DRVR_CTRL0 0x080c
69#define PCIE_GEN3_L0_RESET_GEN 0x0890
70#define PCIE_GEN3_L0_BIST_ERR_CNT1_STATUS 0x08a8
71#define PCIE_GEN3_L0_BIST_ERR_CNT2_STATUS 0x08ac
72#define PCIE_GEN3_L0_DEBUG_BUS_STATUS4 0x08bc
73#define PCIE_GEN3_PCIE_PHY_PCS_STATUS 0x1aac
74
Tony Truong349ee492014-10-01 17:35:56 -070075#define PCIE20_PARF_SYS_CTRL 0x00
Tony Truongb213ac12017-04-05 15:21:20 -070076#define PCIE20_PARF_PM_CTRL 0x20
Tony Truong349ee492014-10-01 17:35:56 -070077#define PCIE20_PARF_PM_STTS 0x24
78#define PCIE20_PARF_PCS_DEEMPH 0x34
79#define PCIE20_PARF_PCS_SWING 0x38
80#define PCIE20_PARF_PHY_CTRL 0x40
81#define PCIE20_PARF_PHY_REFCLK 0x4C
82#define PCIE20_PARF_CONFIG_BITS 0x50
83#define PCIE20_PARF_TEST_BUS 0xE4
84#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
85#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
86#define PCIE20_PARF_LTSSM 0x1B0
87#define PCIE20_PARF_INT_ALL_STATUS 0x224
88#define PCIE20_PARF_INT_ALL_CLEAR 0x228
89#define PCIE20_PARF_INT_ALL_MASK 0x22C
90#define PCIE20_PARF_SID_OFFSET 0x234
91#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
92#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
Tony Truongb213ac12017-04-05 15:21:20 -070093#define PCIE20_PARF_DEVICE_TYPE 0x1000
Tony Truong349ee492014-10-01 17:35:56 -070094
95#define PCIE20_ELBI_VERSION 0x00
96#define PCIE20_ELBI_SYS_CTRL 0x04
97#define PCIE20_ELBI_SYS_STTS 0x08
98
99#define PCIE20_CAP 0x70
100#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
101#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
102
103#define PCIE20_COMMAND_STATUS 0x04
104#define PCIE20_HEADER_TYPE 0x0C
105#define PCIE20_BUSNUMBERS 0x18
106#define PCIE20_MEMORY_BASE_LIMIT 0x20
107#define PCIE20_BRIDGE_CTRL 0x3C
108#define PCIE20_DEVICE_CONTROL_STATUS 0x78
109#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
110
111#define PCIE20_AUX_CLK_FREQ_REG 0xB40
112#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
113#define PCIE20_ACK_N_FTS 0xff00
114
115#define PCIE20_PLR_IATU_VIEWPORT 0x900
116#define PCIE20_PLR_IATU_CTRL1 0x904
117#define PCIE20_PLR_IATU_CTRL2 0x908
118#define PCIE20_PLR_IATU_LBAR 0x90C
119#define PCIE20_PLR_IATU_UBAR 0x910
120#define PCIE20_PLR_IATU_LAR 0x914
121#define PCIE20_PLR_IATU_LTAR 0x918
122#define PCIE20_PLR_IATU_UTAR 0x91c
123
Tony Truongf49801f2017-10-25 11:22:35 -0700124#define PCIE_IATU_BASE(n) (n * 0x200)
125
126#define PCIE_IATU_CTRL1(n) (PCIE_IATU_BASE(n) + 0x00)
127#define PCIE_IATU_CTRL2(n) (PCIE_IATU_BASE(n) + 0x04)
128#define PCIE_IATU_LBAR(n) (PCIE_IATU_BASE(n) + 0x08)
129#define PCIE_IATU_UBAR(n) (PCIE_IATU_BASE(n) + 0x0c)
130#define PCIE_IATU_LAR(n) (PCIE_IATU_BASE(n) + 0x10)
131#define PCIE_IATU_LTAR(n) (PCIE_IATU_BASE(n) + 0x14)
132#define PCIE_IATU_UTAR(n) (PCIE_IATU_BASE(n) + 0x18)
Tony Truong06ff2ed2017-01-15 19:28:13 -0800133
134#define PCIE20_PORT_LINK_CTRL_REG 0x710
135#define PCIE20_GEN3_RELATED_REG 0x890
136#define PCIE20_PIPE_LOOPBACK_CONTROL 0x8b8
137#define LOOPBACK_BASE_ADDR_OFFSET 0x8000
138
Tony Truong349ee492014-10-01 17:35:56 -0700139#define PCIE20_CTRL1_TYPE_CFG0 0x04
140#define PCIE20_CTRL1_TYPE_CFG1 0x05
141
142#define PCIE20_CAP_ID 0x10
143#define L1SUB_CAP_ID 0x1E
144
145#define PCIE_CAP_PTR_OFFSET 0x34
146#define PCIE_EXT_CAP_OFFSET 0x100
147
148#define PCIE20_AER_UNCORR_ERR_STATUS_REG 0x104
149#define PCIE20_AER_CORR_ERR_STATUS_REG 0x110
150#define PCIE20_AER_ROOT_ERR_STATUS_REG 0x130
151#define PCIE20_AER_ERR_SRC_ID_REG 0x134
152
153#define RD 0
154#define WR 1
155#define MSM_PCIE_ERROR -1
156
157#define PERST_PROPAGATION_DELAY_US_MIN 1000
158#define PERST_PROPAGATION_DELAY_US_MAX 1005
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +0530159#define SWITCH_DELAY_MAX 20
Tony Truong349ee492014-10-01 17:35:56 -0700160#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
161#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
162#define LINK_UP_TIMEOUT_US_MIN 5000
163#define LINK_UP_TIMEOUT_US_MAX 5100
164#define LINK_UP_CHECK_MAX_COUNT 20
Tony Truong74ee0fd2017-10-06 19:37:43 -0700165#define EP_UP_TIMEOUT_US_MIN 1000
166#define EP_UP_TIMEOUT_US_MAX 1005
167#define EP_UP_TIMEOUT_US 1000000
Tony Truong349ee492014-10-01 17:35:56 -0700168#define PHY_STABILIZATION_DELAY_US_MIN 995
169#define PHY_STABILIZATION_DELAY_US_MAX 1005
170#define POWER_DOWN_DELAY_US_MIN 10
171#define POWER_DOWN_DELAY_US_MAX 11
172#define LINKDOWN_INIT_WAITING_US_MIN 995
173#define LINKDOWN_INIT_WAITING_US_MAX 1005
174#define LINKDOWN_WAITING_US_MIN 4900
175#define LINKDOWN_WAITING_US_MAX 5100
176#define LINKDOWN_WAITING_COUNT 200
177
Tony Truong24e02ba2017-08-30 14:53:14 -0700178#define GEN1_SPEED 0x1
179#define GEN2_SPEED 0x2
180#define GEN3_SPEED 0x3
181
Tony Truong349ee492014-10-01 17:35:56 -0700182#define PHY_READY_TIMEOUT_COUNT 10
183#define XMLH_LINK_UP 0x400
184#define MAX_LINK_RETRIES 5
185#define MAX_BUS_NUM 3
186#define MAX_PROP_SIZE 32
187#define MAX_RC_NAME_LEN 15
188#define MSM_PCIE_MAX_VREG 4
Tony Truongb213ac12017-04-05 15:21:20 -0700189#define MSM_PCIE_MAX_CLK 12
Tony Truong349ee492014-10-01 17:35:56 -0700190#define MSM_PCIE_MAX_PIPE_CLK 1
191#define MAX_RC_NUM 3
192#define MAX_DEVICE_NUM 20
193#define MAX_SHORT_BDF_NUM 16
194#define PCIE_TLP_RD_SIZE 0x5
195#define PCIE_MSI_NR_IRQS 256
196#define MSM_PCIE_MAX_MSI 32
197#define MAX_MSG_LEN 80
198#define PCIE_LOG_PAGES (50)
199#define PCIE_CONF_SPACE_DW 1024
200#define PCIE_CLEAR 0xDEADBEEF
201#define PCIE_LINK_DOWN 0xFFFFFFFF
202
Tony Truongb213ac12017-04-05 15:21:20 -0700203#define MSM_PCIE_MAX_RESET 5
Tony Truong349ee492014-10-01 17:35:56 -0700204#define MSM_PCIE_MAX_PIPE_RESET 1
205
206#define MSM_PCIE_MSI_PHY 0xa0000000
207#define PCIE20_MSI_CTRL_ADDR (0x820)
208#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
209#define PCIE20_MSI_CTRL_INTR_EN (0x828)
210#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
211#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
212#define PCIE20_MSI_CTRL_MAX 8
213
214/* PM control options */
215#define PM_IRQ 0x1
216#define PM_CLK 0x2
217#define PM_GPIO 0x4
218#define PM_VREG 0x8
219#define PM_PIPE_CLK 0x10
220#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
221
222#ifdef CONFIG_PHYS_ADDR_T_64BIT
223#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
224#else
225#define PCIE_UPPER_ADDR(addr) (0x0)
226#endif
227#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
228
Tony Truong09223e42017-11-08 16:50:20 -0800229#define PCIE_BUS_PRIV_DATA(bus) \
230 (struct msm_pcie_dev_t *)(bus->sysdata)
231
Tony Truong349ee492014-10-01 17:35:56 -0700232/* Config Space Offsets */
233#define BDF_OFFSET(bus, devfn) \
234 ((bus << 24) | (devfn << 16))
235
236#define PCIE_GEN_DBG(x...) do { \
237 if (msm_pcie_debug_mask) \
238 pr_alert(x); \
239 } while (0)
240
241#define PCIE_DBG(dev, fmt, arg...) do { \
242 if ((dev) && (dev)->ipc_log_long) \
243 ipc_log_string((dev)->ipc_log_long, \
244 "DBG1:%s: " fmt, __func__, arg); \
245 if ((dev) && (dev)->ipc_log) \
246 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
247 if (msm_pcie_debug_mask) \
248 pr_alert("%s: " fmt, __func__, arg); \
249 } while (0)
250
251#define PCIE_DBG2(dev, fmt, arg...) do { \
252 if ((dev) && (dev)->ipc_log) \
253 ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
254 if (msm_pcie_debug_mask) \
255 pr_alert("%s: " fmt, __func__, arg); \
256 } while (0)
257
258#define PCIE_DBG3(dev, fmt, arg...) do { \
259 if ((dev) && (dev)->ipc_log) \
260 ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
261 if (msm_pcie_debug_mask) \
262 pr_alert("%s: " fmt, __func__, arg); \
263 } while (0)
264
265#define PCIE_DUMP(dev, fmt, arg...) do { \
266 if ((dev) && (dev)->ipc_log_dump) \
267 ipc_log_string((dev)->ipc_log_dump, \
268 "DUMP:%s: " fmt, __func__, arg); \
269 } while (0)
270
271#define PCIE_DBG_FS(dev, fmt, arg...) do { \
272 if ((dev) && (dev)->ipc_log_dump) \
273 ipc_log_string((dev)->ipc_log_dump, \
274 "DBG_FS:%s: " fmt, __func__, arg); \
275 pr_alert("%s: " fmt, __func__, arg); \
276 } while (0)
277
278#define PCIE_INFO(dev, fmt, arg...) do { \
279 if ((dev) && (dev)->ipc_log_long) \
280 ipc_log_string((dev)->ipc_log_long, \
281 "INFO:%s: " fmt, __func__, arg); \
282 if ((dev) && (dev)->ipc_log) \
283 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
284 pr_info("%s: " fmt, __func__, arg); \
285 } while (0)
286
287#define PCIE_ERR(dev, fmt, arg...) do { \
288 if ((dev) && (dev)->ipc_log_long) \
289 ipc_log_string((dev)->ipc_log_long, \
290 "ERR:%s: " fmt, __func__, arg); \
291 if ((dev) && (dev)->ipc_log) \
292 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
293 pr_err("%s: " fmt, __func__, arg); \
294 } while (0)
295
296
297enum msm_pcie_res {
298 MSM_PCIE_RES_PARF,
299 MSM_PCIE_RES_PHY,
300 MSM_PCIE_RES_DM_CORE,
301 MSM_PCIE_RES_ELBI,
Tony Truongf49801f2017-10-25 11:22:35 -0700302 MSM_PCIE_RES_IATU,
Tony Truong349ee492014-10-01 17:35:56 -0700303 MSM_PCIE_RES_CONF,
304 MSM_PCIE_RES_IO,
305 MSM_PCIE_RES_BARS,
306 MSM_PCIE_RES_TCSR,
307 MSM_PCIE_MAX_RES,
308};
309
310enum msm_pcie_irq {
311 MSM_PCIE_INT_MSI,
312 MSM_PCIE_INT_A,
313 MSM_PCIE_INT_B,
314 MSM_PCIE_INT_C,
315 MSM_PCIE_INT_D,
316 MSM_PCIE_INT_PLS_PME,
317 MSM_PCIE_INT_PME_LEGACY,
318 MSM_PCIE_INT_PLS_ERR,
319 MSM_PCIE_INT_AER_LEGACY,
320 MSM_PCIE_INT_LINK_UP,
321 MSM_PCIE_INT_LINK_DOWN,
322 MSM_PCIE_INT_BRIDGE_FLUSH_N,
323 MSM_PCIE_INT_GLOBAL_INT,
324 MSM_PCIE_MAX_IRQ,
325};
326
327enum msm_pcie_irq_event {
328 MSM_PCIE_INT_EVT_LINK_DOWN = 1,
329 MSM_PCIE_INT_EVT_BME,
330 MSM_PCIE_INT_EVT_PM_TURNOFF,
331 MSM_PCIE_INT_EVT_DEBUG,
332 MSM_PCIE_INT_EVT_LTR,
333 MSM_PCIE_INT_EVT_MHI_Q6,
334 MSM_PCIE_INT_EVT_MHI_A7,
335 MSM_PCIE_INT_EVT_DSTATE_CHANGE,
336 MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
337 MSM_PCIE_INT_EVT_MMIO_WRITE,
338 MSM_PCIE_INT_EVT_CFG_WRITE,
339 MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
340 MSM_PCIE_INT_EVT_LINK_UP,
341 MSM_PCIE_INT_EVT_AER_LEGACY,
342 MSM_PCIE_INT_EVT_AER_ERR,
343 MSM_PCIE_INT_EVT_PME_LEGACY,
344 MSM_PCIE_INT_EVT_PLS_PME,
345 MSM_PCIE_INT_EVT_INTD,
346 MSM_PCIE_INT_EVT_INTC,
347 MSM_PCIE_INT_EVT_INTB,
348 MSM_PCIE_INT_EVT_INTA,
349 MSM_PCIE_INT_EVT_EDMA,
350 MSM_PCIE_INT_EVT_MSI_0,
351 MSM_PCIE_INT_EVT_MSI_1,
352 MSM_PCIE_INT_EVT_MSI_2,
353 MSM_PCIE_INT_EVT_MSI_3,
354 MSM_PCIE_INT_EVT_MSI_4,
355 MSM_PCIE_INT_EVT_MSI_5,
356 MSM_PCIE_INT_EVT_MSI_6,
357 MSM_PCIE_INT_EVT_MSI_7,
358 MSM_PCIE_INT_EVT_MAX = 30,
359};
360
361enum msm_pcie_gpio {
362 MSM_PCIE_GPIO_PERST,
363 MSM_PCIE_GPIO_WAKE,
364 MSM_PCIE_GPIO_EP,
365 MSM_PCIE_MAX_GPIO
366};
367
368enum msm_pcie_link_status {
369 MSM_PCIE_LINK_DEINIT,
370 MSM_PCIE_LINK_ENABLED,
371 MSM_PCIE_LINK_DISABLED
372};
373
Tony Truong9f2c7722017-02-28 15:02:27 -0800374enum msm_pcie_boot_option {
375 MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
376 MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
377};
378
Tony Truong349ee492014-10-01 17:35:56 -0700379/* gpio info structure */
380struct msm_pcie_gpio_info_t {
381 char *name;
382 uint32_t num;
383 bool out;
384 uint32_t on;
385 uint32_t init;
386 bool required;
387};
388
389/* voltage regulator info structrue */
390struct msm_pcie_vreg_info_t {
391 struct regulator *hdl;
392 char *name;
393 uint32_t max_v;
394 uint32_t min_v;
395 uint32_t opt_mode;
396 bool required;
397};
398
399/* reset info structure */
400struct msm_pcie_reset_info_t {
401 struct reset_control *hdl;
402 char *name;
403 bool required;
404};
405
406/* clock info structure */
407struct msm_pcie_clk_info_t {
408 struct clk *hdl;
409 char *name;
410 u32 freq;
411 bool config_mem;
412 bool required;
413};
414
415/* resource info structure */
416struct msm_pcie_res_info_t {
417 char *name;
418 struct resource *resource;
419 void __iomem *base;
420};
421
422/* irq info structrue */
423struct msm_pcie_irq_info_t {
424 char *name;
425 uint32_t num;
426};
427
428/* phy info structure */
429struct msm_pcie_phy_info_t {
430 u32 offset;
431 u32 val;
432 u32 delay;
433};
434
435/* PCIe device info structure */
436struct msm_pcie_device_info {
437 u32 bdf;
438 struct pci_dev *dev;
439 short short_bdf;
440 u32 sid;
441 int domain;
442 void __iomem *conf_base;
443 unsigned long phy_address;
444 u32 dev_ctrlstts_offset;
445 struct msm_pcie_register_event *event_reg;
446 bool registered;
447};
448
449/* msm pcie device structure */
450struct msm_pcie_dev_t {
451 struct platform_device *pdev;
452 struct pci_dev *dev;
453 struct regulator *gdsc;
454 struct regulator *gdsc_smmu;
455 struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
456 struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
457 struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
458 struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
459 struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
460 struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
461 struct msm_pcie_irq_info_t msi[MSM_PCIE_MAX_MSI];
462 struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
463 struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
464
465 void __iomem *parf;
466 void __iomem *phy;
467 void __iomem *elbi;
Tony Truongf49801f2017-10-25 11:22:35 -0700468 void __iomem *iatu;
Tony Truong349ee492014-10-01 17:35:56 -0700469 void __iomem *dm_core;
470 void __iomem *conf;
471 void __iomem *bars;
472 void __iomem *tcsr;
473
474 uint32_t axi_bar_start;
475 uint32_t axi_bar_end;
476
477 struct resource *dev_mem_res;
478 struct resource *dev_io_res;
479
480 uint32_t wake_n;
481 uint32_t vreg_n;
482 uint32_t gpio_n;
483 uint32_t parf_deemph;
484 uint32_t parf_swing;
485
486 bool cfg_access;
487 spinlock_t cfg_lock;
488 unsigned long irqsave_flags;
489 struct mutex enumerate_lock;
490 struct mutex setup_lock;
491
492 struct irq_domain *irq_domain;
493 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
494 uint32_t msi_gicm_addr;
495 uint32_t msi_gicm_base;
496 bool use_msi;
497
498 enum msm_pcie_link_status link_status;
499 bool user_suspend;
500 bool disable_pc;
501 struct pci_saved_state *saved_state;
502
503 struct wakeup_source ws;
504 struct msm_bus_scale_pdata *bus_scale_table;
505 uint32_t bus_client;
506
507 bool l0s_supported;
508 bool l1_supported;
509 bool l1ss_supported;
510 bool common_clk_en;
511 bool clk_power_manage_en;
512 bool aux_clk_sync;
513 bool aer_enable;
514 bool smmu_exist;
515 uint32_t smmu_sid_base;
516 uint32_t n_fts;
Tony Truong24e02ba2017-08-30 14:53:14 -0700517 uint32_t max_link_speed;
Tony Truong349ee492014-10-01 17:35:56 -0700518 bool ext_ref_clk;
Tony Truong349ee492014-10-01 17:35:56 -0700519 uint32_t ep_latency;
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +0530520 uint32_t switch_latency;
Tony Truong349ee492014-10-01 17:35:56 -0700521 uint32_t wr_halt_size;
Tony Truong41e63ec2017-08-30 12:08:12 -0700522 uint32_t slv_addr_space_size;
Tony Truong349ee492014-10-01 17:35:56 -0700523 uint32_t cpl_timeout;
524 uint32_t current_bdf;
Tony Truong349ee492014-10-01 17:35:56 -0700525 uint32_t perst_delay_us_min;
526 uint32_t perst_delay_us_max;
527 uint32_t tlp_rd_size;
528 bool linkdown_panic;
Tony Truong9f2c7722017-02-28 15:02:27 -0800529 uint32_t boot_option;
Tony Truong349ee492014-10-01 17:35:56 -0700530
531 uint32_t rc_idx;
532 uint32_t phy_ver;
533 bool drv_ready;
534 bool enumerated;
535 struct work_struct handle_wake_work;
536 struct mutex recovery_lock;
537 spinlock_t linkdown_lock;
538 spinlock_t wakeup_lock;
539 spinlock_t global_irq_lock;
540 spinlock_t aer_lock;
541 ulong linkdown_counter;
542 ulong link_turned_on_counter;
543 ulong link_turned_off_counter;
544 ulong rc_corr_counter;
545 ulong rc_non_fatal_counter;
546 ulong rc_fatal_counter;
547 ulong ep_corr_counter;
548 ulong ep_non_fatal_counter;
549 ulong ep_fatal_counter;
550 bool suspending;
551 ulong wake_counter;
552 u32 num_active_ep;
553 u32 num_ep;
554 bool pending_ep_reg;
555 u32 phy_len;
Tony Truong349ee492014-10-01 17:35:56 -0700556 struct msm_pcie_phy_info_t *phy_sequence;
Tony Truong349ee492014-10-01 17:35:56 -0700557 u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
558 u32 rc_shadow[PCIE_CONF_SPACE_DW];
559 bool shadow_en;
560 bool bridge_found;
561 struct msm_pcie_register_event *event_reg;
562 unsigned int scm_dev_id;
563 bool power_on;
564 void *ipc_log;
565 void *ipc_log_long;
566 void *ipc_log_dump;
567 bool use_19p2mhz_aux_clk;
568 bool use_pinctrl;
569 struct pinctrl *pinctrl;
570 struct pinctrl_state *pins_default;
571 struct pinctrl_state *pins_sleep;
572 struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
573};
574
Tony Truong349ee492014-10-01 17:35:56 -0700575/* debug mask sys interface */
576static int msm_pcie_debug_mask;
577module_param_named(debug_mask, msm_pcie_debug_mask,
578 int, 0644);
579
Tony Truong7416d722017-09-12 16:45:18 -0700580/*
581 * For each bit set, invert the default capability
582 * option for the corresponding root complex
583 * and its devices.
584 */
585static int msm_pcie_invert_l0s_support;
586module_param_named(invert_l0s_support, msm_pcie_invert_l0s_support,
587 int, 0644);
588static int msm_pcie_invert_l1_support;
589module_param_named(invert_l1_support, msm_pcie_invert_l1_support,
590 int, 0644);
591static int msm_pcie_invert_l1ss_support;
592module_param_named(invert_l1ss_support, msm_pcie_invert_l1ss_support,
593 int, 0644);
594static int msm_pcie_invert_aer_support;
595module_param_named(invert_aer_support, msm_pcie_invert_aer_support,
596 int, 0644);
597
598/*
599 * For each bit set, keep the resources on when link training fails
600 * or linkdown occurs for the corresponding root complex
601 */
602static int msm_pcie_keep_resources_on;
603module_param_named(keep_resources_on, msm_pcie_keep_resources_on,
604 int, 0644);
605
Tony Truong349ee492014-10-01 17:35:56 -0700606/* debugfs values */
607static u32 rc_sel;
608static u32 base_sel;
609static u32 wr_offset;
610static u32 wr_mask;
611static u32 wr_value;
612static ulong corr_counter_limit = 5;
613
Tony Truong349ee492014-10-01 17:35:56 -0700614/* Table to track info of PCIe devices */
615static struct msm_pcie_device_info
616 msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
617
618/* PCIe driver state */
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700619static struct pcie_drv_sta {
Tony Truong349ee492014-10-01 17:35:56 -0700620 u32 rc_num;
621 struct mutex drv_lock;
622} pcie_drv;
623
624/* msm pcie device data */
625static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
626
627/* regulators */
628static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
629 {NULL, "vreg-3.3", 0, 0, 0, false},
630 {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
631 {NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
632 {NULL, "vreg-cx", 0, 0, 0, false}
633};
634
635/* GPIOs */
636static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
637 {"perst-gpio", 0, 1, 0, 0, 1},
638 {"wake-gpio", 0, 0, 0, 0, 0},
639 {"qcom,ep-gpio", 0, 1, 1, 0, 0}
640};
641
642/* resets */
643static struct msm_pcie_reset_info_t
644msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
645 {
Tony Truongb213ac12017-04-05 15:21:20 -0700646 {NULL, "pcie_0_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700647 {NULL, "pcie_phy_reset", false},
648 {NULL, "pcie_phy_com_reset", false},
649 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
650 {NULL, "pcie_0_phy_reset", false}
651 },
652 {
Tony Truongb213ac12017-04-05 15:21:20 -0700653 {NULL, "pcie_1_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700654 {NULL, "pcie_phy_reset", false},
655 {NULL, "pcie_phy_com_reset", false},
656 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
657 {NULL, "pcie_1_phy_reset", false}
658 },
659 {
Tony Truongb213ac12017-04-05 15:21:20 -0700660 {NULL, "pcie_2_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700661 {NULL, "pcie_phy_reset", false},
662 {NULL, "pcie_phy_com_reset", false},
663 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
664 {NULL, "pcie_2_phy_reset", false}
665 }
666};
667
668/* pipe reset */
669static struct msm_pcie_reset_info_t
670msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
671 {
672 {NULL, "pcie_0_phy_pipe_reset", false}
673 },
674 {
675 {NULL, "pcie_1_phy_pipe_reset", false}
676 },
677 {
678 {NULL, "pcie_2_phy_pipe_reset", false}
679 }
680};
681
682/* clocks */
683static struct msm_pcie_clk_info_t
684 msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
685 {
686 {NULL, "pcie_0_ref_clk_src", 0, false, false},
687 {NULL, "pcie_0_aux_clk", 1010000, false, true},
688 {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
689 {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
690 {NULL, "pcie_0_slv_axi_clk", 0, true, true},
691 {NULL, "pcie_0_ldo", 0, false, true},
692 {NULL, "pcie_0_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700693 {NULL, "pcie_0_slv_q2a_axi_clk", 0, false, false},
694 {NULL, "pcie_phy_refgen_clk", 0, false, false},
695 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700696 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
697 {NULL, "pcie_phy_aux_clk", 0, false, false}
698 },
699 {
700 {NULL, "pcie_1_ref_clk_src", 0, false, false},
701 {NULL, "pcie_1_aux_clk", 1010000, false, true},
702 {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
703 {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
704 {NULL, "pcie_1_slv_axi_clk", 0, true, true},
705 {NULL, "pcie_1_ldo", 0, false, true},
706 {NULL, "pcie_1_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700707 {NULL, "pcie_1_slv_q2a_axi_clk", 0, false, false},
708 {NULL, "pcie_phy_refgen_clk", 0, false, false},
709 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700710 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
711 {NULL, "pcie_phy_aux_clk", 0, false, false}
712 },
713 {
714 {NULL, "pcie_2_ref_clk_src", 0, false, false},
715 {NULL, "pcie_2_aux_clk", 1010000, false, true},
716 {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
717 {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
718 {NULL, "pcie_2_slv_axi_clk", 0, true, true},
719 {NULL, "pcie_2_ldo", 0, false, true},
720 {NULL, "pcie_2_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700721 {NULL, "pcie_2_slv_q2a_axi_clk", 0, false, false},
722 {NULL, "pcie_phy_refgen_clk", 0, false, false},
723 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700724 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
725 {NULL, "pcie_phy_aux_clk", 0, false, false}
726 }
727};
728
729/* Pipe Clocks */
730static struct msm_pcie_clk_info_t
731 msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
732 {
733 {NULL, "pcie_0_pipe_clk", 125000000, true, true},
734 },
735 {
736 {NULL, "pcie_1_pipe_clk", 125000000, true, true},
737 },
738 {
739 {NULL, "pcie_2_pipe_clk", 125000000, true, true},
740 }
741};
742
743/* resources */
744static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700745 {"parf", NULL, NULL},
746 {"phy", NULL, NULL},
747 {"dm_core", NULL, NULL},
748 {"elbi", NULL, NULL},
Tony Truongf49801f2017-10-25 11:22:35 -0700749 {"iatu", NULL, NULL},
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700750 {"conf", NULL, NULL},
751 {"io", NULL, NULL},
752 {"bars", NULL, NULL},
753 {"tcsr", NULL, NULL}
Tony Truong349ee492014-10-01 17:35:56 -0700754};
755
756/* irqs */
757static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
758 {"int_msi", 0},
759 {"int_a", 0},
760 {"int_b", 0},
761 {"int_c", 0},
762 {"int_d", 0},
763 {"int_pls_pme", 0},
764 {"int_pme_legacy", 0},
765 {"int_pls_err", 0},
766 {"int_aer_legacy", 0},
767 {"int_pls_link_up", 0},
768 {"int_pls_link_down", 0},
769 {"int_bridge_flush_n", 0},
770 {"int_global_int", 0}
771};
772
773/* MSIs */
774static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
775 {"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
776 {"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
777 {"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
778 {"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
779 {"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
780 {"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
781 {"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
782 {"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
783};
784
Tony Truong7772e692017-04-13 17:03:34 -0700785static int msm_pcie_config_device(struct pci_dev *dev, void *pdev);
Tony Truongb1af8b62017-05-31 15:40:38 -0700786static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev,
787 struct pci_dev *pdev, bool enable);
Tony Truong7772e692017-04-13 17:03:34 -0700788
Tony Truong349ee492014-10-01 17:35:56 -0700789#ifdef CONFIG_ARM
Tony Truong349ee492014-10-01 17:35:56 -0700790static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
791{
792 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
793}
794#else
Tony Truong349ee492014-10-01 17:35:56 -0700795static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
796{
797}
798#endif
799
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700800static inline void msm_pcie_write_reg(void __iomem *base, u32 offset, u32 value)
Tony Truong349ee492014-10-01 17:35:56 -0700801{
802 writel_relaxed(value, base + offset);
803 /* ensure that changes propagated to the hardware */
804 wmb();
805}
806
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700807static inline void msm_pcie_write_reg_field(void __iomem *base, u32 offset,
Tony Truong349ee492014-10-01 17:35:56 -0700808 const u32 mask, u32 val)
809{
810 u32 shift = find_first_bit((void *)&mask, 32);
811 u32 tmp = readl_relaxed(base + offset);
812
813 tmp &= ~mask; /* clear written bits */
814 val = tmp | (val << shift);
815 writel_relaxed(val, base + offset);
816 /* ensure that changes propagated to the hardware */
817 wmb();
818}
819
Tony Truongb1af8b62017-05-31 15:40:38 -0700820static inline void msm_pcie_config_clear_set_dword(struct pci_dev *pdev,
821 int pos, u32 clear, u32 set)
822{
823 u32 val;
824
825 pci_read_config_dword(pdev, pos, &val);
826 val &= ~clear;
827 val |= set;
828 pci_write_config_dword(pdev, pos, val);
829}
830
Tony Truong349ee492014-10-01 17:35:56 -0700831static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
832 struct msm_pcie_clk_info_t *info)
833{
834 int ret;
835
836 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
837 if (ret)
838 PCIE_ERR(dev,
839 "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
840 dev->rc_idx, info->name, ret);
841 else
842 PCIE_DBG2(dev,
843 "PCIe: RC%d configured core memory for clk %s.\n",
844 dev->rc_idx, info->name);
845
846 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
847 if (ret)
848 PCIE_ERR(dev,
849 "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
850 dev->rc_idx, info->name, ret);
851 else
852 PCIE_DBG2(dev,
853 "PCIe: RC%d configured peripheral memory for clk %s.\n",
854 dev->rc_idx, info->name);
855}
856
Tony Truong349ee492014-10-01 17:35:56 -0700857static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
858{
859 int i, size;
Tony Truong349ee492014-10-01 17:35:56 -0700860
861 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
862 for (i = 0; i < size; i += 32) {
863 PCIE_DUMP(dev,
864 "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
865 dev->rc_idx, i,
866 readl_relaxed(dev->phy + i),
867 readl_relaxed(dev->phy + (i + 4)),
868 readl_relaxed(dev->phy + (i + 8)),
869 readl_relaxed(dev->phy + (i + 12)),
870 readl_relaxed(dev->phy + (i + 16)),
871 readl_relaxed(dev->phy + (i + 20)),
872 readl_relaxed(dev->phy + (i + 24)),
873 readl_relaxed(dev->phy + (i + 28)));
874 }
875}
876
Tony Truong349ee492014-10-01 17:35:56 -0700877static void pcie_phy_init(struct msm_pcie_dev_t *dev)
878{
879 int i;
880 struct msm_pcie_phy_info_t *phy_seq;
881
882 PCIE_DBG(dev,
883 "RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
884 dev->rc_idx);
885
886 if (dev->phy_sequence) {
887 i = dev->phy_len;
888 phy_seq = dev->phy_sequence;
889 while (i--) {
890 msm_pcie_write_reg(dev->phy,
891 phy_seq->offset,
892 phy_seq->val);
893 if (phy_seq->delay)
894 usleep_range(phy_seq->delay,
895 phy_seq->delay + 1);
896 phy_seq++;
897 }
Tony Truong349ee492014-10-01 17:35:56 -0700898 }
899}
900
Tony Truong349ee492014-10-01 17:35:56 -0700901static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
902{
Tony Truongc275fe02017-04-18 19:04:20 -0700903 u32 pos = (dev->max_link_speed == GEN2_SPEED) ?
904 PCIE_N_PCS_STATUS(dev->rc_idx) :
905 PCIE_GEN3_PCIE_PHY_PCS_STATUS;
906
907 if (readl_relaxed(dev->phy + pos) & BIT(6))
Tony Truong349ee492014-10-01 17:35:56 -0700908 return false;
909 else
910 return true;
911}
Tony Truong349ee492014-10-01 17:35:56 -0700912
913static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
914{
915 int ret, scm_ret;
916
917 if (!dev) {
918 pr_err("PCIe: the input pcie dev is NULL.\n");
919 return -ENODEV;
920 }
921
922 ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
923 if (ret || scm_ret) {
924 PCIE_ERR(dev,
925 "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
926 dev->rc_idx, ret, scm_ret);
927 return ret ? ret : -EINVAL;
928 }
929
930 return 0;
931}
932
933static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
934 u32 offset)
935{
936 if (offset % 4) {
937 PCIE_ERR(dev,
938 "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
939 dev->rc_idx, offset);
940 return MSM_PCIE_ERROR;
941 }
942
943 return 0;
944}
945
946static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
947 bool check_sw_stts,
948 bool check_ep,
949 void __iomem *ep_conf)
950{
951 u32 val;
952
953 if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
954 PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
955 dev->rc_idx);
956 return false;
957 }
958
959 if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
960 PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
961 dev->rc_idx);
962 return false;
963 }
964
965 val = readl_relaxed(dev->dm_core);
966 PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
967 dev->rc_idx, val);
968 if (val == PCIE_LINK_DOWN) {
969 PCIE_ERR(dev,
970 "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
971 dev->rc_idx, dev->rc_idx, val);
972 return false;
973 }
974
975 if (check_ep) {
976 val = readl_relaxed(ep_conf);
977 PCIE_DBG(dev,
978 "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
979 dev->rc_idx, val);
980 if (val == PCIE_LINK_DOWN) {
981 PCIE_ERR(dev,
982 "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
983 dev->rc_idx, dev->rc_idx, val);
984 return false;
985 }
986 }
987
988 return true;
989}
990
991static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
992{
993 int i, j;
994 u32 val = 0;
995 u32 *shadow;
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700996 void __iomem *cfg = dev->conf;
Tony Truong349ee492014-10-01 17:35:56 -0700997
998 for (i = 0; i < MAX_DEVICE_NUM; i++) {
999 if (!rc && !dev->pcidev_table[i].bdf)
1000 break;
1001 if (rc) {
1002 cfg = dev->dm_core;
1003 shadow = dev->rc_shadow;
1004 } else {
1005 if (!msm_pcie_confirm_linkup(dev, false, true,
1006 dev->pcidev_table[i].conf_base))
1007 continue;
1008
1009 shadow = dev->ep_shadow[i];
1010 PCIE_DBG(dev,
1011 "PCIe Device: %02x:%02x.%01x\n",
1012 dev->pcidev_table[i].bdf >> 24,
1013 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1014 dev->pcidev_table[i].bdf >> 16 & 0x07);
1015 }
1016 for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
1017 val = shadow[j];
1018 if (val != PCIE_CLEAR) {
1019 PCIE_DBG3(dev,
1020 "PCIe: before recovery:cfg 0x%x:0x%x\n",
1021 j * 4, readl_relaxed(cfg + j * 4));
1022 PCIE_DBG3(dev,
1023 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1024 j, j * 4, val);
1025 writel_relaxed(val, cfg + j * 4);
1026 /* ensure changes propagated to the hardware */
1027 wmb();
1028 PCIE_DBG3(dev,
1029 "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
1030 j * 4, readl_relaxed(cfg + j * 4));
1031 }
1032 }
1033 if (rc)
1034 break;
1035
1036 pci_save_state(dev->pcidev_table[i].dev);
1037 cfg += SZ_4K;
1038 }
1039}
1040
1041static void msm_pcie_write_mask(void __iomem *addr,
1042 uint32_t clear_mask, uint32_t set_mask)
1043{
1044 uint32_t val;
1045
1046 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
1047 writel_relaxed(val, addr);
1048 wmb(); /* ensure data is written to hardware register */
1049}
1050
1051static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
1052{
1053 int i, size;
1054 u32 original;
1055
1056 PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
1057
1058 original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
1059 for (i = 1; i <= 0x1A; i++) {
1060 msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
1061 0xFF0000, i << 16);
1062 PCIE_DUMP(dev,
1063 "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
1064 dev->rc_idx,
1065 readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
1066 readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
1067 }
1068 writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
1069
1070 PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
1071
1072 size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
1073 for (i = 0; i < size; i += 32) {
1074 PCIE_DUMP(dev,
1075 "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1076 dev->rc_idx, i,
1077 readl_relaxed(dev->parf + i),
1078 readl_relaxed(dev->parf + (i + 4)),
1079 readl_relaxed(dev->parf + (i + 8)),
1080 readl_relaxed(dev->parf + (i + 12)),
1081 readl_relaxed(dev->parf + (i + 16)),
1082 readl_relaxed(dev->parf + (i + 20)),
1083 readl_relaxed(dev->parf + (i + 24)),
1084 readl_relaxed(dev->parf + (i + 28)));
1085 }
1086}
1087
1088static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
1089{
1090 PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
1091 dev->rc_idx, dev->enumerated ? "" : "not");
1092 PCIE_DBG_FS(dev, "PCIe: link is %s\n",
1093 (dev->link_status == MSM_PCIE_LINK_ENABLED)
1094 ? "enabled" : "disabled");
1095 PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
1096 dev->cfg_access ? "" : "not");
1097 PCIE_DBG_FS(dev, "use_msi is %d\n",
1098 dev->use_msi);
1099 PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
1100 dev->use_pinctrl);
1101 PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
1102 dev->use_19p2mhz_aux_clk);
1103 PCIE_DBG_FS(dev, "user_suspend is %d\n",
1104 dev->user_suspend);
1105 PCIE_DBG_FS(dev, "num_ep: %d\n",
1106 dev->num_ep);
1107 PCIE_DBG_FS(dev, "num_active_ep: %d\n",
1108 dev->num_active_ep);
1109 PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
1110 dev->pending_ep_reg ? "true" : "false");
1111 PCIE_DBG_FS(dev, "phy_len is %d",
1112 dev->phy_len);
Tony Truong349ee492014-10-01 17:35:56 -07001113 PCIE_DBG_FS(dev, "disable_pc is %d",
1114 dev->disable_pc);
1115 PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
1116 dev->l0s_supported ? "" : "not");
1117 PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
1118 dev->l1_supported ? "" : "not");
1119 PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
1120 dev->l1ss_supported ? "" : "not");
1121 PCIE_DBG_FS(dev, "common_clk_en is %d\n",
1122 dev->common_clk_en);
1123 PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
1124 dev->clk_power_manage_en);
1125 PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
1126 dev->aux_clk_sync);
1127 PCIE_DBG_FS(dev, "AER is %s enable\n",
1128 dev->aer_enable ? "" : "not");
1129 PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
1130 dev->ext_ref_clk);
Tony Truong9f2c7722017-02-28 15:02:27 -08001131 PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
1132 dev->boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07001133 PCIE_DBG_FS(dev, "phy_ver is %d\n",
1134 dev->phy_ver);
1135 PCIE_DBG_FS(dev, "drv_ready is %d\n",
1136 dev->drv_ready);
1137 PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
1138 dev->linkdown_panic);
1139 PCIE_DBG_FS(dev, "the link is %s suspending\n",
1140 dev->suspending ? "" : "not");
1141 PCIE_DBG_FS(dev, "shadow is %s enabled\n",
1142 dev->shadow_en ? "" : "not");
1143 PCIE_DBG_FS(dev, "the power of RC is %s on\n",
1144 dev->power_on ? "" : "not");
1145 PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
1146 dev->msi_gicm_addr);
1147 PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
1148 dev->msi_gicm_base);
1149 PCIE_DBG_FS(dev, "bus_client: %d\n",
1150 dev->bus_client);
Tony Truong349ee492014-10-01 17:35:56 -07001151 PCIE_DBG_FS(dev, "smmu does %s exist\n",
1152 dev->smmu_exist ? "" : "not");
1153 PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
1154 dev->smmu_sid_base);
1155 PCIE_DBG_FS(dev, "n_fts: %d\n",
1156 dev->n_fts);
Tony Truong349ee492014-10-01 17:35:56 -07001157 PCIE_DBG_FS(dev, "ep_latency: %dms\n",
1158 dev->ep_latency);
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +05301159 PCIE_DBG_FS(dev, "switch_latency: %dms\n",
1160 dev->switch_latency);
Tony Truong349ee492014-10-01 17:35:56 -07001161 PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
1162 dev->wr_halt_size);
Tony Truong41e63ec2017-08-30 12:08:12 -07001163 PCIE_DBG_FS(dev, "slv_addr_space_size: 0x%x\n",
1164 dev->slv_addr_space_size);
Tony Truong349ee492014-10-01 17:35:56 -07001165 PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
1166 dev->cpl_timeout);
1167 PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
1168 dev->current_bdf);
1169 PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
1170 dev->perst_delay_us_min);
1171 PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
1172 dev->perst_delay_us_max);
1173 PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
1174 dev->tlp_rd_size);
1175 PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
1176 dev->rc_corr_counter);
1177 PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
1178 dev->rc_non_fatal_counter);
1179 PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
1180 dev->rc_fatal_counter);
1181 PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
1182 dev->ep_corr_counter);
1183 PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
1184 dev->ep_non_fatal_counter);
1185 PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
1186 dev->ep_fatal_counter);
1187 PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
1188 dev->linkdown_counter);
1189 PCIE_DBG_FS(dev, "wake_counter: %lu\n",
1190 dev->wake_counter);
Tony Truong24e02ba2017-08-30 14:53:14 -07001191 PCIE_DBG_FS(dev, "max_link_speed: 0x%x\n",
1192 dev->max_link_speed);
Tony Truong349ee492014-10-01 17:35:56 -07001193 PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
1194 dev->link_turned_on_counter);
1195 PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
1196 dev->link_turned_off_counter);
1197}
1198
1199static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
1200{
1201 int i, j;
1202 u32 val = 0;
1203 u32 *shadow;
1204
1205 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1206 if (!rc && !dev->pcidev_table[i].bdf)
1207 break;
1208 if (rc) {
1209 shadow = dev->rc_shadow;
1210 } else {
1211 shadow = dev->ep_shadow[i];
1212 PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
1213 dev->pcidev_table[i].bdf >> 24,
1214 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1215 dev->pcidev_table[i].bdf >> 16 & 0x07);
1216 }
1217 for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
1218 val = shadow[j];
1219 if (val != PCIE_CLEAR) {
1220 PCIE_DBG_FS(dev,
1221 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1222 j, j * 4, val);
1223 }
1224 }
1225 if (rc)
1226 break;
1227 }
1228}
1229
1230static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
1231 u32 testcase)
1232{
Tony Truong09223e42017-11-08 16:50:20 -08001233 u32 dbi_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
Tony Truong06ff2ed2017-01-15 19:28:13 -08001234 phys_addr_t loopback_lbar_phy =
Tony Truong09223e42017-11-08 16:50:20 -08001235 dev->res[MSM_PCIE_RES_DM_CORE].resource->start +
1236 LOOPBACK_BASE_ADDR_OFFSET;
Tony Truong06ff2ed2017-01-15 19:28:13 -08001237 static uint32_t loopback_val = 0x1;
Tony Truong09223e42017-11-08 16:50:20 -08001238 static dma_addr_t loopback_ddr_phy;
Tony Truong06ff2ed2017-01-15 19:28:13 -08001239 static uint32_t *loopback_ddr_vir;
1240 static void __iomem *loopback_lbar_vir;
Tony Truong349ee492014-10-01 17:35:56 -07001241 int ret, i;
1242 u32 base_sel_size = 0;
1243 u32 val = 0;
1244 u32 current_offset = 0;
1245 u32 ep_l1sub_ctrl1_offset = 0;
1246 u32 ep_l1sub_cap_reg1_offset = 0;
1247 u32 ep_link_ctrlstts_offset = 0;
1248 u32 ep_dev_ctrl2stts2_offset = 0;
1249
1250 if (testcase >= 5 && testcase <= 10) {
1251 current_offset =
1252 readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
1253
1254 while (current_offset) {
1255 val = readl_relaxed(dev->conf + current_offset);
1256 if ((val & 0xff) == PCIE20_CAP_ID) {
1257 ep_link_ctrlstts_offset = current_offset +
1258 0x10;
1259 ep_dev_ctrl2stts2_offset = current_offset +
1260 0x28;
1261 break;
1262 }
1263 current_offset = (val >> 8) & 0xff;
1264 }
1265
1266 if (!ep_link_ctrlstts_offset)
1267 PCIE_DBG(dev,
1268 "RC%d endpoint does not support PCIe capability registers\n",
1269 dev->rc_idx);
1270 else
1271 PCIE_DBG(dev,
1272 "RC%d: ep_link_ctrlstts_offset: 0x%x\n",
1273 dev->rc_idx, ep_link_ctrlstts_offset);
1274 }
1275
1276 switch (testcase) {
1277 case 0: /* output status */
1278 PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
1279 dev->rc_idx);
1280 msm_pcie_show_status(dev);
1281 break;
1282 case 1: /* disable link */
1283 PCIE_DBG_FS(dev,
1284 "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
1285 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
1286 dev->dev, NULL,
1287 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1288 if (ret)
1289 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
1290 __func__);
1291 else
1292 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
1293 __func__);
1294 break;
1295 case 2: /* enable link and recover config space for RC and EP */
1296 PCIE_DBG_FS(dev,
1297 "\n\nPCIe: RC%d: enable link and recover config space\n\n",
1298 dev->rc_idx);
1299 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
1300 dev->dev, NULL,
1301 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1302 if (ret)
1303 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
1304 __func__);
1305 else {
1306 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
1307 msm_pcie_recover_config(dev->dev);
1308 }
1309 break;
1310 case 3: /*
1311 * disable and enable link, recover config space for
1312 * RC and EP
1313 */
1314 PCIE_DBG_FS(dev,
1315 "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
1316 dev->rc_idx);
1317 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
1318 dev->dev, NULL,
1319 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1320 if (ret)
1321 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
1322 __func__);
1323 else
1324 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
1325 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
1326 dev->dev, NULL,
1327 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1328 if (ret)
1329 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
1330 __func__);
1331 else {
1332 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
1333 msm_pcie_recover_config(dev->dev);
1334 }
1335 break;
1336 case 4: /* dump shadow registers for RC and EP */
1337 PCIE_DBG_FS(dev,
1338 "\n\nPCIe: RC%d: dumping RC shadow registers\n",
1339 dev->rc_idx);
1340 msm_pcie_shadow_dump(dev, true);
1341
1342 PCIE_DBG_FS(dev,
1343 "\n\nPCIe: RC%d: dumping EP shadow registers\n",
1344 dev->rc_idx);
1345 msm_pcie_shadow_dump(dev, false);
1346 break;
1347 case 5: /* disable L0s */
1348 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
1349 dev->rc_idx);
1350 msm_pcie_write_mask(dev->dm_core +
1351 PCIE20_CAP_LINKCTRLSTATUS,
1352 BIT(0), 0);
1353 msm_pcie_write_mask(dev->conf +
1354 ep_link_ctrlstts_offset,
1355 BIT(0), 0);
1356 if (dev->shadow_en) {
1357 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1358 readl_relaxed(dev->dm_core +
1359 PCIE20_CAP_LINKCTRLSTATUS);
1360 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1361 readl_relaxed(dev->conf +
1362 ep_link_ctrlstts_offset);
1363 }
1364 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1365 readl_relaxed(dev->dm_core +
1366 PCIE20_CAP_LINKCTRLSTATUS));
1367 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1368 readl_relaxed(dev->conf +
1369 ep_link_ctrlstts_offset));
1370 break;
1371 case 6: /* enable L0s */
1372 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
1373 dev->rc_idx);
1374 msm_pcie_write_mask(dev->dm_core +
1375 PCIE20_CAP_LINKCTRLSTATUS,
1376 0, BIT(0));
1377 msm_pcie_write_mask(dev->conf +
1378 ep_link_ctrlstts_offset,
1379 0, BIT(0));
1380 if (dev->shadow_en) {
1381 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1382 readl_relaxed(dev->dm_core +
1383 PCIE20_CAP_LINKCTRLSTATUS);
1384 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1385 readl_relaxed(dev->conf +
1386 ep_link_ctrlstts_offset);
1387 }
1388 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1389 readl_relaxed(dev->dm_core +
1390 PCIE20_CAP_LINKCTRLSTATUS));
1391 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1392 readl_relaxed(dev->conf +
1393 ep_link_ctrlstts_offset));
1394 break;
1395 case 7: /* disable L1 */
1396 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
1397 dev->rc_idx);
1398 msm_pcie_write_mask(dev->dm_core +
1399 PCIE20_CAP_LINKCTRLSTATUS,
1400 BIT(1), 0);
1401 msm_pcie_write_mask(dev->conf +
1402 ep_link_ctrlstts_offset,
1403 BIT(1), 0);
1404 if (dev->shadow_en) {
1405 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1406 readl_relaxed(dev->dm_core +
1407 PCIE20_CAP_LINKCTRLSTATUS);
1408 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1409 readl_relaxed(dev->conf +
1410 ep_link_ctrlstts_offset);
1411 }
1412 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1413 readl_relaxed(dev->dm_core +
1414 PCIE20_CAP_LINKCTRLSTATUS));
1415 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1416 readl_relaxed(dev->conf +
1417 ep_link_ctrlstts_offset));
1418 break;
1419 case 8: /* enable L1 */
1420 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
1421 dev->rc_idx);
1422 msm_pcie_write_mask(dev->dm_core +
1423 PCIE20_CAP_LINKCTRLSTATUS,
1424 0, BIT(1));
1425 msm_pcie_write_mask(dev->conf +
1426 ep_link_ctrlstts_offset,
1427 0, BIT(1));
1428 if (dev->shadow_en) {
1429 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1430 readl_relaxed(dev->dm_core +
1431 PCIE20_CAP_LINKCTRLSTATUS);
1432 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1433 readl_relaxed(dev->conf +
1434 ep_link_ctrlstts_offset);
1435 }
1436 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1437 readl_relaxed(dev->dm_core +
1438 PCIE20_CAP_LINKCTRLSTATUS));
1439 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1440 readl_relaxed(dev->conf +
1441 ep_link_ctrlstts_offset));
1442 break;
1443 case 9: /* disable L1ss */
1444 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
1445 dev->rc_idx);
1446 current_offset = PCIE_EXT_CAP_OFFSET;
1447 while (current_offset) {
1448 val = readl_relaxed(dev->conf + current_offset);
1449 if ((val & 0xffff) == L1SUB_CAP_ID) {
1450 ep_l1sub_ctrl1_offset =
1451 current_offset + 0x8;
1452 break;
1453 }
1454 current_offset = val >> 20;
1455 }
1456 if (!ep_l1sub_ctrl1_offset) {
1457 PCIE_DBG_FS(dev,
1458 "PCIe: RC%d endpoint does not support l1ss registers\n",
1459 dev->rc_idx);
1460 break;
1461 }
1462
1463 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
1464 dev->rc_idx, ep_l1sub_ctrl1_offset);
1465
1466 msm_pcie_write_reg_field(dev->dm_core,
1467 PCIE20_L1SUB_CONTROL1,
1468 0xf, 0);
1469 msm_pcie_write_mask(dev->dm_core +
1470 PCIE20_DEVICE_CONTROL2_STATUS2,
1471 BIT(10), 0);
1472 msm_pcie_write_reg_field(dev->conf,
1473 ep_l1sub_ctrl1_offset,
1474 0xf, 0);
1475 msm_pcie_write_mask(dev->conf +
1476 ep_dev_ctrl2stts2_offset,
1477 BIT(10), 0);
1478 if (dev->shadow_en) {
1479 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
1480 readl_relaxed(dev->dm_core +
1481 PCIE20_L1SUB_CONTROL1);
1482 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
1483 readl_relaxed(dev->dm_core +
1484 PCIE20_DEVICE_CONTROL2_STATUS2);
1485 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
1486 readl_relaxed(dev->conf +
1487 ep_l1sub_ctrl1_offset);
1488 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
1489 readl_relaxed(dev->conf +
1490 ep_dev_ctrl2stts2_offset);
1491 }
1492 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
1493 readl_relaxed(dev->dm_core +
1494 PCIE20_L1SUB_CONTROL1));
1495 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
1496 readl_relaxed(dev->dm_core +
1497 PCIE20_DEVICE_CONTROL2_STATUS2));
1498 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
1499 readl_relaxed(dev->conf +
1500 ep_l1sub_ctrl1_offset));
1501 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
1502 readl_relaxed(dev->conf +
1503 ep_dev_ctrl2stts2_offset));
1504 break;
1505 case 10: /* enable L1ss */
1506 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
1507 dev->rc_idx);
1508 current_offset = PCIE_EXT_CAP_OFFSET;
1509 while (current_offset) {
1510 val = readl_relaxed(dev->conf + current_offset);
1511 if ((val & 0xffff) == L1SUB_CAP_ID) {
1512 ep_l1sub_cap_reg1_offset =
1513 current_offset + 0x4;
1514 ep_l1sub_ctrl1_offset =
1515 current_offset + 0x8;
1516 break;
1517 }
1518 current_offset = val >> 20;
1519 }
1520 if (!ep_l1sub_ctrl1_offset) {
1521 PCIE_DBG_FS(dev,
1522 "PCIe: RC%d endpoint does not support l1ss registers\n",
1523 dev->rc_idx);
1524 break;
1525 }
1526
1527 val = readl_relaxed(dev->conf +
1528 ep_l1sub_cap_reg1_offset);
1529
1530 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
1531 val);
1532 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
1533 dev->rc_idx, ep_l1sub_ctrl1_offset);
1534
1535 val &= 0xf;
1536
1537 msm_pcie_write_reg_field(dev->dm_core,
1538 PCIE20_L1SUB_CONTROL1,
1539 0xf, val);
1540 msm_pcie_write_mask(dev->dm_core +
1541 PCIE20_DEVICE_CONTROL2_STATUS2,
1542 0, BIT(10));
1543 msm_pcie_write_reg_field(dev->conf,
1544 ep_l1sub_ctrl1_offset,
1545 0xf, val);
1546 msm_pcie_write_mask(dev->conf +
1547 ep_dev_ctrl2stts2_offset,
1548 0, BIT(10));
1549 if (dev->shadow_en) {
1550 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
1551 readl_relaxed(dev->dm_core +
1552 PCIE20_L1SUB_CONTROL1);
1553 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
1554 readl_relaxed(dev->dm_core +
1555 PCIE20_DEVICE_CONTROL2_STATUS2);
1556 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
1557 readl_relaxed(dev->conf +
1558 ep_l1sub_ctrl1_offset);
1559 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
1560 readl_relaxed(dev->conf +
1561 ep_dev_ctrl2stts2_offset);
1562 }
1563 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
1564 readl_relaxed(dev->dm_core +
1565 PCIE20_L1SUB_CONTROL1));
1566 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
1567 readl_relaxed(dev->dm_core +
1568 PCIE20_DEVICE_CONTROL2_STATUS2));
1569 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
1570 readl_relaxed(dev->conf +
1571 ep_l1sub_ctrl1_offset));
1572 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
1573 readl_relaxed(dev->conf +
1574 ep_dev_ctrl2stts2_offset));
1575 break;
1576 case 11: /* enumerate PCIe */
1577 PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
1578 dev->rc_idx);
1579 if (dev->enumerated)
1580 PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
1581 dev->rc_idx);
1582 else {
1583 if (!msm_pcie_enumerate(dev->rc_idx))
1584 PCIE_DBG_FS(dev,
1585 "PCIe: RC%d is successfully enumerated\n",
1586 dev->rc_idx);
1587 else
1588 PCIE_DBG_FS(dev,
1589 "PCIe: RC%d enumeration failed\n",
1590 dev->rc_idx);
1591 }
1592 break;
1593 case 12: /* write a value to a register */
1594 PCIE_DBG_FS(dev,
1595 "\n\nPCIe: RC%d: writing a value to a register\n\n",
1596 dev->rc_idx);
1597
1598 if (!base_sel) {
1599 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
1600 break;
1601 }
1602
1603 PCIE_DBG_FS(dev,
1604 "base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
1605 dev->res[base_sel - 1].name,
1606 dev->res[base_sel - 1].base,
1607 wr_offset, wr_mask, wr_value);
1608
Tony Truong95747382017-01-06 14:03:03 -08001609 base_sel_size = resource_size(dev->res[base_sel - 1].resource);
1610
1611 if (wr_offset > base_sel_size - 4 ||
1612 msm_pcie_check_align(dev, wr_offset))
1613 PCIE_DBG_FS(dev,
1614 "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
1615 dev->rc_idx, wr_offset, base_sel_size - 4);
1616 else
1617 msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
1618 wr_offset, wr_mask, wr_value);
Tony Truong349ee492014-10-01 17:35:56 -07001619
1620 break;
1621 case 13: /* dump all registers of base_sel */
1622 if (!base_sel) {
1623 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
1624 break;
1625 } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
1626 pcie_parf_dump(dev);
1627 break;
1628 } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
1629 pcie_phy_dump(dev);
1630 break;
1631 } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
1632 base_sel_size = 0x1000;
1633 } else {
1634 base_sel_size = resource_size(
1635 dev->res[base_sel - 1].resource);
1636 }
1637
1638 PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
1639 dev->res[base_sel - 1].name, dev->rc_idx);
1640
1641 for (i = 0; i < base_sel_size; i += 32) {
1642 PCIE_DBG_FS(dev,
1643 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1644 i, readl_relaxed(dev->res[base_sel - 1].base + i),
1645 readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
1646 readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
1647 readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
1648 readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
1649 readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
1650 readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
1651 readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
1652 }
1653 break;
Tony Truong06ff2ed2017-01-15 19:28:13 -08001654 case 14:
1655 PCIE_DBG_FS(dev,
1656 "PCIe: RC%d: Allocate 4K DDR memory and map LBAR.\n",
1657 dev->rc_idx);
1658 loopback_ddr_vir = dma_alloc_coherent(&dev->pdev->dev,
1659 (SZ_1K * sizeof(*loopback_ddr_vir)),
1660 &loopback_ddr_phy, GFP_KERNEL);
1661 if (!loopback_ddr_vir) {
1662 PCIE_DBG_FS(dev,
1663 "PCIe: RC%d: failed to dma_alloc_coherent.\n",
1664 dev->rc_idx);
1665 } else {
1666 PCIE_DBG_FS(dev,
1667 "PCIe: RC%d: VIR DDR memory address: 0x%pK\n",
1668 dev->rc_idx, loopback_ddr_vir);
1669 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001670 "PCIe: RC%d: PHY DDR memory address: %pad\n",
1671 dev->rc_idx, &loopback_ddr_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001672 }
1673
Tony Truong09223e42017-11-08 16:50:20 -08001674 PCIE_DBG_FS(dev, "PCIe: RC%d: map LBAR: %pa\n",
1675 dev->rc_idx, &loopback_lbar_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001676 loopback_lbar_vir = devm_ioremap(&dev->pdev->dev,
1677 loopback_lbar_phy, SZ_4K);
1678 if (!loopback_lbar_vir) {
Tony Truong09223e42017-11-08 16:50:20 -08001679 PCIE_DBG_FS(dev, "PCIe: RC%d: failed to map %pa\n",
1680 dev->rc_idx, &loopback_lbar_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001681 } else {
1682 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001683 "PCIe: RC%d: successfully mapped %pa to 0x%pK\n",
1684 dev->rc_idx, &loopback_lbar_phy,
Tony Truong06ff2ed2017-01-15 19:28:13 -08001685 loopback_lbar_vir);
1686 }
1687 break;
1688 case 15:
1689 PCIE_DBG_FS(dev,
1690 "PCIe: RC%d: Release 4K DDR memory and unmap LBAR.\n",
1691 dev->rc_idx);
1692
1693 if (loopback_ddr_vir) {
1694 dma_free_coherent(&dev->pdev->dev, SZ_4K,
1695 loopback_ddr_vir, loopback_ddr_phy);
1696 loopback_ddr_vir = NULL;
1697 }
1698
1699 if (loopback_lbar_vir) {
1700 devm_iounmap(&dev->pdev->dev,
1701 loopback_lbar_vir);
1702 loopback_lbar_vir = NULL;
1703 }
1704 break;
1705 case 16:
1706 PCIE_DBG_FS(dev,
1707 "PCIe: RC%d: Print DDR and LBAR addresses.\n",
1708 dev->rc_idx);
1709
1710 if (!loopback_ddr_vir || !loopback_lbar_vir) {
1711 PCIE_DBG_FS(dev,
1712 "PCIe: RC%d: DDR or LBAR address is not mapped\n",
1713 dev->rc_idx);
1714 break;
1715 }
1716
1717 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001718 "PCIe: RC%d: PHY DDR address: %pad\n",
1719 dev->rc_idx, &loopback_ddr_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001720 PCIE_DBG_FS(dev,
1721 "PCIe: RC%d: VIR DDR address: 0x%pK\n",
1722 dev->rc_idx, loopback_ddr_vir);
1723 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001724 "PCIe: RC%d: PHY LBAR address: %pa\n",
1725 dev->rc_idx, &loopback_lbar_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001726 PCIE_DBG_FS(dev,
1727 "PCIe: RC%d: VIR LBAR address: 0x%pK\n",
1728 dev->rc_idx, loopback_lbar_vir);
1729 break;
1730 case 17:
1731 PCIE_DBG_FS(dev,
1732 "PCIe: RC%d: Configure Loopback.\n",
1733 dev->rc_idx);
1734
1735 writel_relaxed(0x10000,
1736 dev->dm_core + PCIE20_GEN3_RELATED_REG);
1737 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001738 "PCIe: RC%d: 0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001739 dev->rc_idx,
1740 dbi_base_addr + PCIE20_GEN3_RELATED_REG,
1741 readl_relaxed(dev->dm_core +
1742 PCIE20_GEN3_RELATED_REG));
1743
1744 writel_relaxed(0x80000001,
1745 dev->dm_core + PCIE20_PIPE_LOOPBACK_CONTROL);
1746 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001747 "PCIe: RC%d: 0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001748 dev->rc_idx,
1749 dbi_base_addr + PCIE20_PIPE_LOOPBACK_CONTROL,
1750 readl_relaxed(dev->dm_core +
1751 PCIE20_PIPE_LOOPBACK_CONTROL));
1752
1753 writel_relaxed(0x00010124,
1754 dev->dm_core + PCIE20_PORT_LINK_CTRL_REG);
1755 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001756 "PCIe: RC%d: 0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001757 dev->rc_idx,
1758 dbi_base_addr + PCIE20_PORT_LINK_CTRL_REG,
1759 readl_relaxed(dev->dm_core +
1760 PCIE20_PORT_LINK_CTRL_REG));
1761 break;
1762 case 18:
1763 PCIE_DBG_FS(dev, "PCIe: RC%d: Setup iATU.\n", dev->rc_idx);
1764
1765 if (!loopback_ddr_vir) {
1766 PCIE_DBG_FS(dev,
1767 "PCIe: RC%d: DDR address is not mapped.\n",
1768 dev->rc_idx);
1769 break;
1770 }
1771
1772 writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_VIEWPORT);
1773 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001774 "PCIe: RC%d: PCIE20_PLR_IATU_VIEWPORT:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001775 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_VIEWPORT,
1776 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
1777
1778 writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_CTRL1);
1779 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001780 "PCIe: RC%d: PCIE20_PLR_IATU_CTRL1:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001781 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_CTRL1,
1782 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
1783
1784 writel_relaxed(loopback_lbar_phy,
1785 dev->dm_core + PCIE20_PLR_IATU_LBAR);
1786 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001787 "PCIe: RC%d: PCIE20_PLR_IATU_LBAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001788 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LBAR,
1789 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
1790
1791 writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_UBAR);
1792 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001793 "PCIe: RC%d: PCIE20_PLR_IATU_UBAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001794 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_UBAR,
1795 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
1796
1797 writel_relaxed(loopback_lbar_phy + 0xfff,
1798 dev->dm_core + PCIE20_PLR_IATU_LAR);
1799 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001800 "PCIe: RC%d: PCIE20_PLR_IATU_LAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001801 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LAR,
1802 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
1803
1804 writel_relaxed(loopback_ddr_phy,
1805 dev->dm_core + PCIE20_PLR_IATU_LTAR);
1806 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001807 "PCIe: RC%d: PCIE20_PLR_IATU_LTAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001808 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LTAR,
1809 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
1810
1811 writel_relaxed(0, dev->dm_core + PCIE20_PLR_IATU_UTAR);
1812 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001813 "PCIe: RC%d: PCIE20_PLR_IATU_UTAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001814 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_UTAR,
1815 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
1816
1817 writel_relaxed(0x80000000,
1818 dev->dm_core + PCIE20_PLR_IATU_CTRL2);
1819 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001820 "PCIe: RC%d: PCIE20_PLR_IATU_CTRL2:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001821 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_CTRL2,
1822 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
1823 break;
1824 case 19:
1825 PCIE_DBG_FS(dev,
1826 "PCIe: RC%d: Read DDR values.\n",
1827 dev->rc_idx);
1828
1829 if (!loopback_ddr_vir) {
1830 PCIE_DBG_FS(dev,
1831 "PCIe: RC%d: DDR is not mapped\n",
1832 dev->rc_idx);
1833 break;
1834 }
1835
1836 for (i = 0; i < SZ_1K; i += 8) {
1837 PCIE_DBG_FS(dev,
1838 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1839 i,
1840 loopback_ddr_vir[i],
1841 loopback_ddr_vir[i + 1],
1842 loopback_ddr_vir[i + 2],
1843 loopback_ddr_vir[i + 3],
1844 loopback_ddr_vir[i + 4],
1845 loopback_ddr_vir[i + 5],
1846 loopback_ddr_vir[i + 6],
1847 loopback_ddr_vir[i + 7]);
1848 }
1849 break;
1850 case 20:
1851 PCIE_DBG_FS(dev,
1852 "PCIe: RC%d: Read LBAR values.\n",
1853 dev->rc_idx);
1854
1855 if (!loopback_lbar_vir) {
1856 PCIE_DBG_FS(dev,
1857 "PCIe: RC%d: LBAR address is not mapped\n",
1858 dev->rc_idx);
1859 break;
1860 }
1861
1862 for (i = 0; i < SZ_4K; i += 32) {
1863 PCIE_DBG_FS(dev,
1864 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1865 i,
1866 readl_relaxed(loopback_lbar_vir + i),
1867 readl_relaxed(loopback_lbar_vir + (i + 4)),
1868 readl_relaxed(loopback_lbar_vir + (i + 8)),
1869 readl_relaxed(loopback_lbar_vir + (i + 12)),
1870 readl_relaxed(loopback_lbar_vir + (i + 16)),
1871 readl_relaxed(loopback_lbar_vir + (i + 20)),
1872 readl_relaxed(loopback_lbar_vir + (i + 24)),
1873 readl_relaxed(loopback_lbar_vir + (i + 28)));
1874 }
1875 break;
1876 case 21:
1877 PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to DDR.\n",
1878 dev->rc_idx, loopback_val);
1879
1880 if (!loopback_ddr_vir) {
1881 PCIE_DBG_FS(dev,
1882 "PCIe: RC%d: DDR address is not mapped\n",
1883 dev->rc_idx);
1884 break;
1885 }
1886
1887 memset(loopback_ddr_vir, loopback_val,
1888 (SZ_1K * sizeof(*loopback_ddr_vir)));
1889
1890 if (unlikely(loopback_val == UINT_MAX))
1891 loopback_val = 1;
1892 else
1893 loopback_val++;
1894 break;
1895 case 22:
1896 PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to LBAR.\n",
1897 dev->rc_idx, loopback_val);
1898
1899 if (!loopback_lbar_vir) {
1900 PCIE_DBG_FS(dev,
1901 "PCIe: RC%d: LBAR address is not mapped\n",
1902 dev->rc_idx);
1903 break;
1904 }
1905
1906 for (i = 0; i < SZ_4K; i += 32) {
1907 writel_relaxed(loopback_val,
1908 loopback_lbar_vir + i),
1909 writel_relaxed(loopback_val,
1910 loopback_lbar_vir + (i + 4)),
1911 writel_relaxed(loopback_val,
1912 loopback_lbar_vir + (i + 8)),
1913 writel_relaxed(loopback_val,
1914 loopback_lbar_vir + (i + 12)),
1915 writel_relaxed(loopback_val,
1916 loopback_lbar_vir + (i + 16)),
1917 writel_relaxed(loopback_val,
1918 loopback_lbar_vir + (i + 20)),
1919 writel_relaxed(loopback_val,
1920 loopback_lbar_vir + (i + 24)),
1921 writel_relaxed(loopback_val,
1922 loopback_lbar_vir + (i + 28));
1923 }
1924
1925 if (unlikely(loopback_val == UINT_MAX))
1926 loopback_val = 1;
1927 else
1928 loopback_val++;
1929 break;
Tony Truong349ee492014-10-01 17:35:56 -07001930 default:
1931 PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
1932 break;
1933 }
1934}
1935
1936int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
1937 u32 offset, u32 mask, u32 value)
1938{
1939 int ret = 0;
1940 struct msm_pcie_dev_t *pdev = NULL;
1941
1942 if (!dev) {
1943 pr_err("PCIe: the input pci dev is NULL.\n");
1944 return -ENODEV;
1945 }
1946
1947 if (option == 12 || option == 13) {
1948 if (!base || base > 5) {
1949 PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
1950 PCIE_DBG_FS(pdev,
1951 "PCIe: base_sel is still 0x%x\n", base_sel);
1952 return -EINVAL;
1953 }
1954
1955 base_sel = base;
1956 PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
1957
1958 if (option == 12) {
1959 wr_offset = offset;
1960 wr_mask = mask;
1961 wr_value = value;
1962
1963 PCIE_DBG_FS(pdev,
1964 "PCIe: wr_offset is now 0x%x\n", wr_offset);
1965 PCIE_DBG_FS(pdev,
1966 "PCIe: wr_mask is now 0x%x\n", wr_mask);
1967 PCIE_DBG_FS(pdev,
1968 "PCIe: wr_value is now 0x%x\n", wr_value);
1969 }
1970 }
1971
1972 pdev = PCIE_BUS_PRIV_DATA(dev->bus);
1973 rc_sel = 1 << pdev->rc_idx;
1974
1975 msm_pcie_sel_debug_testcase(pdev, option);
1976
1977 return ret;
1978}
1979EXPORT_SYMBOL(msm_pcie_debug_info);
1980
Tony Truongbd9a3412017-02-27 18:30:13 -08001981#ifdef CONFIG_SYSFS
1982static ssize_t msm_pcie_enumerate_store(struct device *dev,
1983 struct device_attribute *attr,
1984 const char *buf, size_t count)
1985{
1986 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
1987 dev_get_drvdata(dev);
1988
1989 if (pcie_dev)
1990 msm_pcie_enumerate(pcie_dev->rc_idx);
1991
1992 return count;
1993}
1994
1995static DEVICE_ATTR(enumerate, 0200, NULL, msm_pcie_enumerate_store);
1996
1997static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
1998{
1999 int ret;
2000
2001 ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
2002 if (ret)
2003 PCIE_DBG_FS(dev,
2004 "RC%d: failed to create sysfs enumerate node\n",
2005 dev->rc_idx);
2006}
2007
2008static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2009{
2010 if (dev->pdev)
2011 device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
2012}
2013#else
2014static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
2015{
2016}
2017
2018static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2019{
2020}
2021#endif
2022
Tony Truong349ee492014-10-01 17:35:56 -07002023#ifdef CONFIG_DEBUG_FS
2024static struct dentry *dent_msm_pcie;
2025static struct dentry *dfile_rc_sel;
2026static struct dentry *dfile_case;
2027static struct dentry *dfile_base_sel;
2028static struct dentry *dfile_linkdown_panic;
2029static struct dentry *dfile_wr_offset;
2030static struct dentry *dfile_wr_mask;
2031static struct dentry *dfile_wr_value;
Tony Truong9f2c7722017-02-28 15:02:27 -08002032static struct dentry *dfile_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002033static struct dentry *dfile_aer_enable;
2034static struct dentry *dfile_corr_counter_limit;
2035
2036static u32 rc_sel_max;
2037
2038static ssize_t msm_pcie_cmd_debug(struct file *file,
2039 const char __user *buf,
2040 size_t count, loff_t *ppos)
2041{
2042 unsigned long ret;
2043 char str[MAX_MSG_LEN];
2044 unsigned int testcase = 0;
2045 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002046 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002047
Tony Truongfdbd5672017-01-06 16:23:14 -08002048 memset(str, 0, size);
2049 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002050 if (ret)
2051 return -EFAULT;
2052
Tony Truongfdbd5672017-01-06 16:23:14 -08002053 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002054 testcase = (testcase * 10) + (str[i] - '0');
2055
2056 if (!rc_sel)
2057 rc_sel = 1;
2058
2059 pr_alert("PCIe: TEST: %d\n", testcase);
2060
2061 for (i = 0; i < MAX_RC_NUM; i++) {
2062 if (!((rc_sel >> i) & 0x1))
2063 continue;
2064 msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
2065 }
2066
2067 return count;
2068}
2069
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002070static const struct file_operations msm_pcie_cmd_debug_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002071 .write = msm_pcie_cmd_debug,
2072};
2073
2074static ssize_t msm_pcie_set_rc_sel(struct file *file,
2075 const char __user *buf,
2076 size_t count, loff_t *ppos)
2077{
2078 unsigned long ret;
2079 char str[MAX_MSG_LEN];
2080 int i;
2081 u32 new_rc_sel = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002082 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002083
Tony Truongfdbd5672017-01-06 16:23:14 -08002084 memset(str, 0, size);
2085 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002086 if (ret)
2087 return -EFAULT;
2088
Tony Truongfdbd5672017-01-06 16:23:14 -08002089 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002090 new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
2091
2092 if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
2093 pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
2094 pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
2095 } else {
2096 rc_sel = new_rc_sel;
2097 pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
2098 }
2099
2100 pr_alert("PCIe: the following RC(s) will be tested:\n");
2101 for (i = 0; i < MAX_RC_NUM; i++) {
2102 if (!rc_sel) {
2103 pr_alert("RC %d\n", i);
2104 break;
2105 } else if (rc_sel & (1 << i)) {
2106 pr_alert("RC %d\n", i);
2107 }
2108 }
2109
2110 return count;
2111}
2112
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002113static const struct file_operations msm_pcie_rc_sel_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002114 .write = msm_pcie_set_rc_sel,
2115};
2116
2117static ssize_t msm_pcie_set_base_sel(struct file *file,
2118 const char __user *buf,
2119 size_t count, loff_t *ppos)
2120{
2121 unsigned long ret;
2122 char str[MAX_MSG_LEN];
2123 int i;
2124 u32 new_base_sel = 0;
2125 char *base_sel_name;
Tony Truongfdbd5672017-01-06 16:23:14 -08002126 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002127
Tony Truongfdbd5672017-01-06 16:23:14 -08002128 memset(str, 0, size);
2129 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002130 if (ret)
2131 return -EFAULT;
2132
Tony Truongfdbd5672017-01-06 16:23:14 -08002133 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002134 new_base_sel = (new_base_sel * 10) + (str[i] - '0');
2135
2136 if (!new_base_sel || new_base_sel > 5) {
2137 pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
2138 new_base_sel);
2139 pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
2140 } else {
2141 base_sel = new_base_sel;
2142 pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
2143 }
2144
2145 switch (base_sel) {
2146 case 1:
2147 base_sel_name = "PARF";
2148 break;
2149 case 2:
2150 base_sel_name = "PHY";
2151 break;
2152 case 3:
2153 base_sel_name = "RC CONFIG SPACE";
2154 break;
2155 case 4:
2156 base_sel_name = "ELBI";
2157 break;
2158 case 5:
Tony Truongf49801f2017-10-25 11:22:35 -07002159 base_sel_name = "IATU";
2160 break;
2161 case 6:
Tony Truong349ee492014-10-01 17:35:56 -07002162 base_sel_name = "EP CONFIG SPACE";
2163 break;
2164 default:
2165 base_sel_name = "INVALID";
2166 break;
2167 }
2168
2169 pr_alert("%s\n", base_sel_name);
2170
2171 return count;
2172}
2173
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002174static const struct file_operations msm_pcie_base_sel_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002175 .write = msm_pcie_set_base_sel,
2176};
2177
2178static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
2179 const char __user *buf,
2180 size_t count, loff_t *ppos)
2181{
2182 unsigned long ret;
2183 char str[MAX_MSG_LEN];
2184 u32 new_linkdown_panic = 0;
2185 int i;
2186
2187 memset(str, 0, sizeof(str));
2188 ret = copy_from_user(str, buf, sizeof(str));
2189 if (ret)
2190 return -EFAULT;
2191
2192 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2193 new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
2194
2195 if (new_linkdown_panic <= 1) {
2196 for (i = 0; i < MAX_RC_NUM; i++) {
2197 if (!rc_sel) {
2198 msm_pcie_dev[0].linkdown_panic =
2199 new_linkdown_panic;
2200 PCIE_DBG_FS(&msm_pcie_dev[0],
2201 "PCIe: RC0: linkdown_panic is now %d\n",
2202 msm_pcie_dev[0].linkdown_panic);
2203 break;
2204 } else if (rc_sel & (1 << i)) {
2205 msm_pcie_dev[i].linkdown_panic =
2206 new_linkdown_panic;
2207 PCIE_DBG_FS(&msm_pcie_dev[i],
2208 "PCIe: RC%d: linkdown_panic is now %d\n",
2209 i, msm_pcie_dev[i].linkdown_panic);
2210 }
2211 }
2212 } else {
2213 pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
2214 new_linkdown_panic);
2215 }
2216
2217 return count;
2218}
2219
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002220static const struct file_operations msm_pcie_linkdown_panic_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002221 .write = msm_pcie_set_linkdown_panic,
2222};
2223
2224static ssize_t msm_pcie_set_wr_offset(struct file *file,
2225 const char __user *buf,
2226 size_t count, loff_t *ppos)
2227{
2228 unsigned long ret;
2229 char str[MAX_MSG_LEN];
2230 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002231 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002232
Tony Truongfdbd5672017-01-06 16:23:14 -08002233 memset(str, 0, size);
2234 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002235 if (ret)
2236 return -EFAULT;
2237
2238 wr_offset = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002239 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002240 wr_offset = (wr_offset * 10) + (str[i] - '0');
2241
2242 pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
2243
2244 return count;
2245}
2246
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002247static const struct file_operations msm_pcie_wr_offset_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002248 .write = msm_pcie_set_wr_offset,
2249};
2250
2251static ssize_t msm_pcie_set_wr_mask(struct file *file,
2252 const char __user *buf,
2253 size_t count, loff_t *ppos)
2254{
2255 unsigned long ret;
2256 char str[MAX_MSG_LEN];
2257 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002258 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002259
Tony Truongfdbd5672017-01-06 16:23:14 -08002260 memset(str, 0, size);
2261 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002262 if (ret)
2263 return -EFAULT;
2264
2265 wr_mask = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002266 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002267 wr_mask = (wr_mask * 10) + (str[i] - '0');
2268
2269 pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
2270
2271 return count;
2272}
2273
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002274static const struct file_operations msm_pcie_wr_mask_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002275 .write = msm_pcie_set_wr_mask,
2276};
2277static ssize_t msm_pcie_set_wr_value(struct file *file,
2278 const char __user *buf,
2279 size_t count, loff_t *ppos)
2280{
2281 unsigned long ret;
2282 char str[MAX_MSG_LEN];
2283 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002284 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002285
Tony Truongfdbd5672017-01-06 16:23:14 -08002286 memset(str, 0, size);
2287 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002288 if (ret)
2289 return -EFAULT;
2290
2291 wr_value = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002292 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002293 wr_value = (wr_value * 10) + (str[i] - '0');
2294
2295 pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
2296
2297 return count;
2298}
2299
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002300static const struct file_operations msm_pcie_wr_value_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002301 .write = msm_pcie_set_wr_value,
2302};
2303
Tony Truong9f2c7722017-02-28 15:02:27 -08002304static ssize_t msm_pcie_set_boot_option(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002305 const char __user *buf,
2306 size_t count, loff_t *ppos)
2307{
2308 unsigned long ret;
2309 char str[MAX_MSG_LEN];
Tony Truong9f2c7722017-02-28 15:02:27 -08002310 u32 new_boot_option = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002311 int i;
2312
2313 memset(str, 0, sizeof(str));
2314 ret = copy_from_user(str, buf, sizeof(str));
2315 if (ret)
2316 return -EFAULT;
2317
2318 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong9f2c7722017-02-28 15:02:27 -08002319 new_boot_option = (new_boot_option * 10) + (str[i] - '0');
Tony Truong349ee492014-10-01 17:35:56 -07002320
Tony Truong9f2c7722017-02-28 15:02:27 -08002321 if (new_boot_option <= 1) {
Tony Truong349ee492014-10-01 17:35:56 -07002322 for (i = 0; i < MAX_RC_NUM; i++) {
2323 if (!rc_sel) {
Tony Truong9f2c7722017-02-28 15:02:27 -08002324 msm_pcie_dev[0].boot_option = new_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002325 PCIE_DBG_FS(&msm_pcie_dev[0],
Tony Truong9f2c7722017-02-28 15:02:27 -08002326 "PCIe: RC0: boot_option is now 0x%x\n",
2327 msm_pcie_dev[0].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002328 break;
2329 } else if (rc_sel & (1 << i)) {
Tony Truong9f2c7722017-02-28 15:02:27 -08002330 msm_pcie_dev[i].boot_option = new_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002331 PCIE_DBG_FS(&msm_pcie_dev[i],
Tony Truong9f2c7722017-02-28 15:02:27 -08002332 "PCIe: RC%d: boot_option is now 0x%x\n",
2333 i, msm_pcie_dev[i].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002334 }
2335 }
2336 } else {
Tony Truong9f2c7722017-02-28 15:02:27 -08002337 pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
2338 new_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002339 }
2340
2341 return count;
2342}
2343
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002344static const struct file_operations msm_pcie_boot_option_ops = {
Tony Truong9f2c7722017-02-28 15:02:27 -08002345 .write = msm_pcie_set_boot_option,
Tony Truong349ee492014-10-01 17:35:56 -07002346};
2347
2348static ssize_t msm_pcie_set_aer_enable(struct file *file,
2349 const char __user *buf,
2350 size_t count, loff_t *ppos)
2351{
2352 unsigned long ret;
2353 char str[MAX_MSG_LEN];
2354 u32 new_aer_enable = 0;
2355 u32 temp_rc_sel;
2356 int i;
2357
2358 memset(str, 0, sizeof(str));
2359 ret = copy_from_user(str, buf, sizeof(str));
2360 if (ret)
2361 return -EFAULT;
2362
2363 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2364 new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
2365
2366 if (new_aer_enable > 1) {
2367 pr_err(
2368 "PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
2369 new_aer_enable);
2370 return count;
2371 }
2372
2373 if (rc_sel)
2374 temp_rc_sel = rc_sel;
2375 else
2376 temp_rc_sel = 0x1;
2377
2378 for (i = 0; i < MAX_RC_NUM; i++) {
2379 if (temp_rc_sel & (1 << i)) {
2380 msm_pcie_dev[i].aer_enable = new_aer_enable;
2381 PCIE_DBG_FS(&msm_pcie_dev[i],
2382 "PCIe: RC%d: aer_enable is now %d\n",
2383 i, msm_pcie_dev[i].aer_enable);
2384
2385 msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
2386 PCIE20_BRIDGE_CTRL,
2387 new_aer_enable ? 0 : BIT(16),
2388 new_aer_enable ? BIT(16) : 0);
2389
2390 PCIE_DBG_FS(&msm_pcie_dev[i],
2391 "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
2392 readl_relaxed(msm_pcie_dev[i].dm_core +
2393 PCIE20_BRIDGE_CTRL));
2394 }
2395 }
2396
2397 return count;
2398}
2399
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002400static const struct file_operations msm_pcie_aer_enable_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002401 .write = msm_pcie_set_aer_enable,
2402};
2403
2404static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
2405 const char __user *buf,
2406 size_t count, loff_t *ppos)
2407{
2408 unsigned long ret;
2409 char str[MAX_MSG_LEN];
2410 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002411 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002412
Tony Truongfdbd5672017-01-06 16:23:14 -08002413 memset(str, 0, size);
2414 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002415 if (ret)
2416 return -EFAULT;
2417
2418 corr_counter_limit = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002419 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002420 corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
2421
2422 pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
2423
2424 return count;
2425}
2426
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002427static const struct file_operations msm_pcie_corr_counter_limit_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002428 .write = msm_pcie_set_corr_counter_limit,
2429};
2430
2431static void msm_pcie_debugfs_init(void)
2432{
2433 rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
2434 wr_mask = 0xffffffff;
2435
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002436 dent_msm_pcie = debugfs_create_dir("pci-msm", NULL);
Tony Truong349ee492014-10-01 17:35:56 -07002437 if (IS_ERR(dent_msm_pcie)) {
2438 pr_err("PCIe: fail to create the folder for debug_fs.\n");
2439 return;
2440 }
2441
2442 dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002443 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002444 &msm_pcie_rc_sel_ops);
2445 if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
2446 pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
2447 goto rc_sel_error;
2448 }
2449
2450 dfile_case = debugfs_create_file("case", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002451 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002452 &msm_pcie_cmd_debug_ops);
2453 if (!dfile_case || IS_ERR(dfile_case)) {
2454 pr_err("PCIe: fail to create the file for debug_fs case.\n");
2455 goto case_error;
2456 }
2457
2458 dfile_base_sel = debugfs_create_file("base_sel", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002459 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002460 &msm_pcie_base_sel_ops);
2461 if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
2462 pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
2463 goto base_sel_error;
2464 }
2465
2466 dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002467 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002468 &msm_pcie_linkdown_panic_ops);
2469 if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
2470 pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
2471 goto linkdown_panic_error;
2472 }
2473
2474 dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002475 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002476 &msm_pcie_wr_offset_ops);
2477 if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
2478 pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
2479 goto wr_offset_error;
2480 }
2481
2482 dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002483 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002484 &msm_pcie_wr_mask_ops);
2485 if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
2486 pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
2487 goto wr_mask_error;
2488 }
2489
2490 dfile_wr_value = debugfs_create_file("wr_value", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002491 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002492 &msm_pcie_wr_value_ops);
2493 if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
2494 pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
2495 goto wr_value_error;
2496 }
2497
Tony Truong9f2c7722017-02-28 15:02:27 -08002498 dfile_boot_option = debugfs_create_file("boot_option", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002499 dent_msm_pcie, NULL,
Tony Truong9f2c7722017-02-28 15:02:27 -08002500 &msm_pcie_boot_option_ops);
2501 if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
2502 pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
2503 goto boot_option_error;
Tony Truong349ee492014-10-01 17:35:56 -07002504 }
2505
2506 dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002507 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002508 &msm_pcie_aer_enable_ops);
2509 if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
2510 pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
2511 goto aer_enable_error;
2512 }
2513
2514 dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002515 0664, dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002516 &msm_pcie_corr_counter_limit_ops);
2517 if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
2518 pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
2519 goto corr_counter_limit_error;
2520 }
2521 return;
2522
2523corr_counter_limit_error:
2524 debugfs_remove(dfile_aer_enable);
2525aer_enable_error:
Tony Truong9f2c7722017-02-28 15:02:27 -08002526 debugfs_remove(dfile_boot_option);
2527boot_option_error:
Tony Truong349ee492014-10-01 17:35:56 -07002528 debugfs_remove(dfile_wr_value);
2529wr_value_error:
2530 debugfs_remove(dfile_wr_mask);
2531wr_mask_error:
2532 debugfs_remove(dfile_wr_offset);
2533wr_offset_error:
2534 debugfs_remove(dfile_linkdown_panic);
2535linkdown_panic_error:
2536 debugfs_remove(dfile_base_sel);
2537base_sel_error:
2538 debugfs_remove(dfile_case);
2539case_error:
2540 debugfs_remove(dfile_rc_sel);
2541rc_sel_error:
2542 debugfs_remove(dent_msm_pcie);
2543}
2544
2545static void msm_pcie_debugfs_exit(void)
2546{
2547 debugfs_remove(dfile_rc_sel);
2548 debugfs_remove(dfile_case);
2549 debugfs_remove(dfile_base_sel);
2550 debugfs_remove(dfile_linkdown_panic);
2551 debugfs_remove(dfile_wr_offset);
2552 debugfs_remove(dfile_wr_mask);
2553 debugfs_remove(dfile_wr_value);
Tony Truong9f2c7722017-02-28 15:02:27 -08002554 debugfs_remove(dfile_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002555 debugfs_remove(dfile_aer_enable);
2556 debugfs_remove(dfile_corr_counter_limit);
2557}
2558#else
2559static void msm_pcie_debugfs_init(void)
2560{
2561}
2562
2563static void msm_pcie_debugfs_exit(void)
2564{
2565}
2566#endif
2567
2568static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
2569{
2570 return readl_relaxed(dev->dm_core +
2571 PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
2572}
2573
2574/**
2575 * msm_pcie_iatu_config - configure outbound address translation region
2576 * @dev: root commpex
2577 * @nr: region number
2578 * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
2579 * @host_addr: - region start address on host
2580 * @host_end: - region end address (low 32 bit) on host,
2581 * upper 32 bits are same as for @host_addr
2582 * @target_addr: - region start address on target
2583 */
2584static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
2585 unsigned long host_addr, u32 host_end,
2586 unsigned long target_addr)
2587{
Tony Truongf49801f2017-10-25 11:22:35 -07002588 void __iomem *iatu_base = dev->iatu ? dev->iatu : dev->dm_core;
Tony Truong349ee492014-10-01 17:35:56 -07002589
Tony Truongf49801f2017-10-25 11:22:35 -07002590 u32 iatu_viewport_offset;
2591 u32 iatu_ctrl1_offset;
2592 u32 iatu_ctrl2_offset;
2593 u32 iatu_lbar_offset;
2594 u32 iatu_ubar_offset;
2595 u32 iatu_lar_offset;
2596 u32 iatu_ltar_offset;
2597 u32 iatu_utar_offset;
2598
2599 if (dev->iatu) {
2600 iatu_viewport_offset = 0;
2601 iatu_ctrl1_offset = PCIE_IATU_CTRL1(nr);
2602 iatu_ctrl2_offset = PCIE_IATU_CTRL2(nr);
2603 iatu_lbar_offset = PCIE_IATU_LBAR(nr);
2604 iatu_ubar_offset = PCIE_IATU_UBAR(nr);
2605 iatu_lar_offset = PCIE_IATU_LAR(nr);
2606 iatu_ltar_offset = PCIE_IATU_LTAR(nr);
2607 iatu_utar_offset = PCIE_IATU_UTAR(nr);
2608 } else {
2609 iatu_viewport_offset = PCIE20_PLR_IATU_VIEWPORT;
2610 iatu_ctrl1_offset = PCIE20_PLR_IATU_CTRL1;
2611 iatu_ctrl2_offset = PCIE20_PLR_IATU_CTRL2;
2612 iatu_lbar_offset = PCIE20_PLR_IATU_LBAR;
2613 iatu_ubar_offset = PCIE20_PLR_IATU_UBAR;
2614 iatu_lar_offset = PCIE20_PLR_IATU_LAR;
2615 iatu_ltar_offset = PCIE20_PLR_IATU_LTAR;
2616 iatu_utar_offset = PCIE20_PLR_IATU_UTAR;
2617 }
2618
2619 if (dev->shadow_en && iatu_viewport_offset) {
Tony Truong349ee492014-10-01 17:35:56 -07002620 dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
2621 nr;
2622 dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
2623 type;
2624 dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
2625 lower_32_bits(host_addr);
2626 dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
2627 upper_32_bits(host_addr);
2628 dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
2629 host_end;
2630 dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
2631 lower_32_bits(target_addr);
2632 dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
2633 upper_32_bits(target_addr);
2634 dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
2635 BIT(31);
2636 }
2637
2638 /* select region */
Tony Truongf49801f2017-10-25 11:22:35 -07002639 if (iatu_viewport_offset) {
2640 writel_relaxed(nr, iatu_base + iatu_viewport_offset);
2641 /* ensure that hardware locks it */
2642 wmb();
2643 }
Tony Truong349ee492014-10-01 17:35:56 -07002644
2645 /* switch off region before changing it */
Tony Truongf49801f2017-10-25 11:22:35 -07002646 writel_relaxed(0, iatu_base + iatu_ctrl2_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002647 /* and wait till it propagates to the hardware */
2648 wmb();
2649
Tony Truongf49801f2017-10-25 11:22:35 -07002650 writel_relaxed(type, iatu_base + iatu_ctrl1_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002651 writel_relaxed(lower_32_bits(host_addr),
Tony Truongf49801f2017-10-25 11:22:35 -07002652 iatu_base + iatu_lbar_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002653 writel_relaxed(upper_32_bits(host_addr),
Tony Truongf49801f2017-10-25 11:22:35 -07002654 iatu_base + iatu_ubar_offset);
2655 writel_relaxed(host_end, iatu_base + iatu_lar_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002656 writel_relaxed(lower_32_bits(target_addr),
Tony Truongf49801f2017-10-25 11:22:35 -07002657 iatu_base + iatu_ltar_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002658 writel_relaxed(upper_32_bits(target_addr),
Tony Truongf49801f2017-10-25 11:22:35 -07002659 iatu_base + iatu_utar_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002660 /* ensure that changes propagated to the hardware */
2661 wmb();
Tony Truongf49801f2017-10-25 11:22:35 -07002662 writel_relaxed(BIT(31), iatu_base + iatu_ctrl2_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002663
2664 /* ensure that changes propagated to the hardware */
2665 wmb();
2666
2667 if (dev->enumerated) {
2668 PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
2669 dev->pcidev_table[nr].bdf >> 24,
2670 dev->pcidev_table[nr].bdf >> 19 & 0x1f,
2671 dev->pcidev_table[nr].bdf >> 16 & 0x07);
Tony Truongf49801f2017-10-25 11:22:35 -07002672 if (iatu_viewport_offset)
2673 PCIE_DBG2(dev, "IATU_VIEWPORT:0x%x\n",
2674 readl_relaxed(dev->dm_core +
2675 PCIE20_PLR_IATU_VIEWPORT));
2676 PCIE_DBG2(dev, "IATU_CTRL1:0x%x\n",
2677 readl_relaxed(iatu_base + iatu_ctrl1_offset));
2678 PCIE_DBG2(dev, "IATU_LBAR:0x%x\n",
2679 readl_relaxed(iatu_base + iatu_lbar_offset));
2680 PCIE_DBG2(dev, "IATU_UBAR:0x%x\n",
2681 readl_relaxed(iatu_base + iatu_ubar_offset));
2682 PCIE_DBG2(dev, "IATU_LAR:0x%x\n",
2683 readl_relaxed(iatu_base + iatu_lar_offset));
2684 PCIE_DBG2(dev, "IATU_LTAR:0x%x\n",
2685 readl_relaxed(iatu_base + iatu_ltar_offset));
2686 PCIE_DBG2(dev, "IATU_UTAR:0x%x\n",
2687 readl_relaxed(iatu_base + iatu_utar_offset));
2688 PCIE_DBG2(dev, "IATU_CTRL2:0x%x\n\n",
2689 readl_relaxed(iatu_base + iatu_ctrl2_offset));
Tony Truong349ee492014-10-01 17:35:56 -07002690 }
2691}
2692
2693/**
2694 * msm_pcie_cfg_bdf - configure for config access
2695 * @dev: root commpex
2696 * @bus: PCI bus number
2697 * @devfn: PCI dev and function number
2698 *
2699 * Remap if required region 0 for config access of proper type
2700 * (CFG0 for bus 1, CFG1 for other buses)
2701 * Cache current device bdf for speed-up
2702 */
2703static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
2704{
2705 struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
2706 u32 bdf = BDF_OFFSET(bus, devfn);
2707 u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
2708
2709 if (dev->current_bdf == bdf)
2710 return;
2711
2712 msm_pcie_iatu_config(dev, 0, type,
2713 axi_conf->start,
2714 axi_conf->start + SZ_4K - 1,
2715 bdf);
2716
2717 dev->current_bdf = bdf;
2718}
2719
2720static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
2721 u32 word_offset, u32 wr_val,
2722 u32 bdf, bool rc)
2723{
2724 int i, j;
2725 u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
2726
2727 if (rc) {
2728 dev->rc_shadow[word_offset / 4] = wr_val;
2729 } else {
2730 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2731 if (!dev->pcidev_table[i].bdf) {
2732 for (j = 0; j < max_dev; j++)
2733 if (!msm_pcie_dev_tbl[j].bdf) {
2734 msm_pcie_dev_tbl[j].bdf = bdf;
2735 break;
2736 }
2737 dev->pcidev_table[i].bdf = bdf;
2738 if ((!dev->bridge_found) && (i > 0))
2739 dev->bridge_found = true;
2740 }
2741 if (dev->pcidev_table[i].bdf == bdf) {
2742 dev->ep_shadow[i][word_offset / 4] = wr_val;
2743 break;
2744 }
2745 }
2746 }
2747}
2748
2749static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
2750 int where, int size, u32 *val)
2751{
2752 uint32_t word_offset, byte_offset, mask;
2753 uint32_t rd_val, wr_val;
2754 struct msm_pcie_dev_t *dev;
2755 void __iomem *config_base;
2756 bool rc = false;
2757 u32 rc_idx;
2758 int rv = 0;
2759 u32 bdf = BDF_OFFSET(bus->number, devfn);
2760 int i;
2761
2762 dev = PCIE_BUS_PRIV_DATA(bus);
2763
2764 if (!dev) {
2765 pr_err("PCIe: No device found for this bus.\n");
2766 *val = ~0;
2767 rv = PCIBIOS_DEVICE_NOT_FOUND;
2768 goto out;
2769 }
2770
2771 rc_idx = dev->rc_idx;
2772 rc = (bus->number == 0);
2773
2774 spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
2775
2776 if (!dev->cfg_access) {
2777 PCIE_DBG3(dev,
2778 "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
2779 rc_idx, bus->number, devfn, where, size);
2780 *val = ~0;
2781 rv = PCIBIOS_DEVICE_NOT_FOUND;
2782 goto unlock;
2783 }
2784
2785 if (rc && (devfn != 0)) {
2786 PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
2787 (oper == RD) ? "rd" : "wr", bus->number, devfn);
2788 *val = ~0;
2789 rv = PCIBIOS_DEVICE_NOT_FOUND;
2790 goto unlock;
2791 }
2792
2793 if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
2794 PCIE_DBG3(dev,
2795 "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
2796 rc_idx, bus->number, devfn, where, size);
2797 *val = ~0;
2798 rv = PCIBIOS_DEVICE_NOT_FOUND;
2799 goto unlock;
2800 }
2801
2802 /* check if the link is up for endpoint */
2803 if (!rc && !msm_pcie_is_link_up(dev)) {
2804 PCIE_ERR(dev,
2805 "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
2806 rc_idx, (oper == RD) ? "rd" : "wr",
2807 bus->number, devfn);
2808 *val = ~0;
2809 rv = PCIBIOS_DEVICE_NOT_FOUND;
2810 goto unlock;
2811 }
2812
2813 if (!rc && !dev->enumerated)
2814 msm_pcie_cfg_bdf(dev, bus->number, devfn);
2815
2816 word_offset = where & ~0x3;
2817 byte_offset = where & 0x3;
Tony Truonge48ec872017-03-14 12:47:58 -07002818 mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002819
2820 if (rc || !dev->enumerated) {
2821 config_base = rc ? dev->dm_core : dev->conf;
2822 } else {
2823 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2824 if (dev->pcidev_table[i].bdf == bdf) {
2825 config_base = dev->pcidev_table[i].conf_base;
2826 break;
2827 }
2828 }
2829 if (i == MAX_DEVICE_NUM) {
2830 *val = ~0;
2831 rv = PCIBIOS_DEVICE_NOT_FOUND;
2832 goto unlock;
2833 }
2834 }
2835
2836 rd_val = readl_relaxed(config_base + word_offset);
2837
2838 if (oper == RD) {
2839 *val = ((rd_val & mask) >> (8 * byte_offset));
2840 PCIE_DBG3(dev,
2841 "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
2842 rc_idx, bus->number, devfn, where, size, *val, rd_val);
2843 } else {
2844 wr_val = (rd_val & ~mask) |
2845 ((*val << (8 * byte_offset)) & mask);
2846
2847 if ((bus->number == 0) && (where == 0x3c))
2848 wr_val = wr_val | (3 << 16);
2849
2850 writel_relaxed(wr_val, config_base + word_offset);
2851 wmb(); /* ensure config data is written to hardware register */
2852
Tony Truonge48ec872017-03-14 12:47:58 -07002853 if (dev->shadow_en) {
2854 if (rd_val == PCIE_LINK_DOWN &&
2855 (readl_relaxed(config_base) == PCIE_LINK_DOWN))
2856 PCIE_ERR(dev,
2857 "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
2858 rc_idx, bus->number, devfn,
2859 where, size);
2860 else
2861 msm_pcie_save_shadow(dev, word_offset, wr_val,
2862 bdf, rc);
2863 }
Tony Truong349ee492014-10-01 17:35:56 -07002864
2865 PCIE_DBG3(dev,
2866 "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
2867 rc_idx, bus->number, devfn, where, size,
2868 wr_val, rd_val, *val);
2869 }
2870
2871unlock:
2872 spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
2873out:
2874 return rv;
2875}
2876
2877static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
2878 int size, u32 *val)
2879{
2880 int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
2881
2882 if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
2883 *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
2884 PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
2885 }
2886
2887 return ret;
2888}
2889
2890static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2891 int where, int size, u32 val)
2892{
2893 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
2894}
2895
2896static struct pci_ops msm_pcie_ops = {
2897 .read = msm_pcie_rd_conf,
2898 .write = msm_pcie_wr_conf,
2899};
2900
2901static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
2902{
2903 int rc = 0, i;
2904 struct msm_pcie_gpio_info_t *info;
2905
2906 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2907
2908 for (i = 0; i < dev->gpio_n; i++) {
2909 info = &dev->gpio[i];
2910
2911 if (!info->num)
2912 continue;
2913
2914 rc = gpio_request(info->num, info->name);
2915 if (rc) {
2916 PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
2917 dev->rc_idx, info->name, rc);
2918 break;
2919 }
2920
2921 if (info->out)
2922 rc = gpio_direction_output(info->num, info->init);
2923 else
2924 rc = gpio_direction_input(info->num);
2925 if (rc) {
2926 PCIE_ERR(dev,
2927 "PCIe: RC%d can't set direction for GPIO %s:%d\n",
2928 dev->rc_idx, info->name, rc);
2929 gpio_free(info->num);
2930 break;
2931 }
2932 }
2933
2934 if (rc)
2935 while (i--)
2936 gpio_free(dev->gpio[i].num);
2937
2938 return rc;
2939}
2940
2941static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
2942{
2943 int i;
2944
2945 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2946
2947 for (i = 0; i < dev->gpio_n; i++)
2948 gpio_free(dev->gpio[i].num);
2949}
2950
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002951static int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07002952{
2953 int i, rc = 0;
2954 struct regulator *vreg;
2955 struct msm_pcie_vreg_info_t *info;
2956
2957 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2958
2959 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
2960 info = &dev->vreg[i];
2961 vreg = info->hdl;
2962
2963 if (!vreg)
2964 continue;
2965
2966 PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
2967 dev->rc_idx, info->name);
2968 if (info->max_v) {
2969 rc = regulator_set_voltage(vreg,
2970 info->min_v, info->max_v);
2971 if (rc) {
2972 PCIE_ERR(dev,
2973 "PCIe: RC%d can't set voltage for %s: %d\n",
2974 dev->rc_idx, info->name, rc);
2975 break;
2976 }
2977 }
2978
2979 if (info->opt_mode) {
2980 rc = regulator_set_load(vreg, info->opt_mode);
2981 if (rc < 0) {
2982 PCIE_ERR(dev,
2983 "PCIe: RC%d can't set mode for %s: %d\n",
2984 dev->rc_idx, info->name, rc);
2985 break;
2986 }
2987 }
2988
2989 rc = regulator_enable(vreg);
2990 if (rc) {
2991 PCIE_ERR(dev,
2992 "PCIe: RC%d can't enable regulator %s: %d\n",
2993 dev->rc_idx, info->name, rc);
2994 break;
2995 }
2996 }
2997
2998 if (rc)
2999 while (i--) {
3000 struct regulator *hdl = dev->vreg[i].hdl;
3001
3002 if (hdl) {
3003 regulator_disable(hdl);
3004 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3005 PCIE_DBG(dev,
3006 "RC%d: Removing %s vote.\n",
3007 dev->rc_idx,
3008 dev->vreg[i].name);
3009 regulator_set_voltage(hdl,
Tony Truongb213ac12017-04-05 15:21:20 -07003010 RPMH_REGULATOR_LEVEL_OFF,
3011 RPMH_REGULATOR_LEVEL_MAX);
Tony Truong349ee492014-10-01 17:35:56 -07003012 }
3013 }
3014
3015 }
3016
3017 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3018
3019 return rc;
3020}
3021
3022static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
3023{
3024 int i;
3025
3026 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3027
3028 for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
3029 if (dev->vreg[i].hdl) {
3030 PCIE_DBG(dev, "Vreg %s is being disabled\n",
3031 dev->vreg[i].name);
3032 regulator_disable(dev->vreg[i].hdl);
3033
3034 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
3035 PCIE_DBG(dev,
3036 "RC%d: Removing %s vote.\n",
3037 dev->rc_idx,
3038 dev->vreg[i].name);
3039 regulator_set_voltage(dev->vreg[i].hdl,
Tony Truongb213ac12017-04-05 15:21:20 -07003040 RPMH_REGULATOR_LEVEL_OFF,
3041 RPMH_REGULATOR_LEVEL_MAX);
Tony Truong349ee492014-10-01 17:35:56 -07003042 }
3043 }
3044 }
3045
3046 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3047}
3048
3049static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
3050{
3051 int i, rc = 0;
3052 struct msm_pcie_clk_info_t *info;
3053 struct msm_pcie_reset_info_t *reset_info;
3054
3055 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3056
3057 rc = regulator_enable(dev->gdsc);
3058
3059 if (rc) {
3060 PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
3061 dev->rc_idx, dev->pdev->name);
3062 return rc;
3063 }
3064
3065 if (dev->gdsc_smmu) {
3066 rc = regulator_enable(dev->gdsc_smmu);
3067
3068 if (rc) {
3069 PCIE_ERR(dev,
3070 "PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
3071 dev->rc_idx, dev->pdev->name);
3072 return rc;
3073 }
3074 }
3075
3076 PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
3077 if (dev->bus_client) {
3078 rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
3079 if (rc) {
3080 PCIE_ERR(dev,
3081 "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
3082 dev->rc_idx, rc);
3083 return rc;
3084 }
3085
3086 PCIE_DBG2(dev,
3087 "PCIe: set bus bandwidth for RC%d.\n",
3088 dev->rc_idx);
3089 }
3090
3091 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3092 info = &dev->clk[i];
3093
3094 if (!info->hdl)
3095 continue;
3096
3097 if (info->config_mem)
3098 msm_pcie_config_clock_mem(dev, info);
3099
3100 if (info->freq) {
3101 rc = clk_set_rate(info->hdl, info->freq);
3102 if (rc) {
3103 PCIE_ERR(dev,
3104 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3105 dev->rc_idx, info->name, rc);
3106 break;
3107 }
3108
3109 PCIE_DBG2(dev,
3110 "PCIe: RC%d set rate for clk %s.\n",
3111 dev->rc_idx, info->name);
3112 }
3113
3114 rc = clk_prepare_enable(info->hdl);
3115
3116 if (rc)
3117 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
3118 dev->rc_idx, info->name);
3119 else
3120 PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
3121 info->name, dev->rc_idx);
3122 }
3123
3124 if (rc) {
3125 PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
3126 dev->rc_idx);
3127 while (i--) {
3128 struct clk *hdl = dev->clk[i].hdl;
3129
3130 if (hdl)
3131 clk_disable_unprepare(hdl);
3132 }
3133
3134 if (dev->gdsc_smmu)
3135 regulator_disable(dev->gdsc_smmu);
3136
3137 regulator_disable(dev->gdsc);
3138 }
3139
3140 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3141 reset_info = &dev->reset[i];
3142 if (reset_info->hdl) {
Tony Truongc21087d2017-05-01 17:58:06 -07003143 rc = reset_control_assert(reset_info->hdl);
3144 if (rc)
3145 PCIE_ERR(dev,
3146 "PCIe: RC%d failed to assert reset for %s.\n",
3147 dev->rc_idx, reset_info->name);
3148 else
3149 PCIE_DBG2(dev,
3150 "PCIe: RC%d successfully asserted reset for %s.\n",
3151 dev->rc_idx, reset_info->name);
3152
3153 /* add a 1ms delay to ensure the reset is asserted */
3154 usleep_range(1000, 1005);
3155
Tony Truong349ee492014-10-01 17:35:56 -07003156 rc = reset_control_deassert(reset_info->hdl);
3157 if (rc)
3158 PCIE_ERR(dev,
3159 "PCIe: RC%d failed to deassert reset for %s.\n",
3160 dev->rc_idx, reset_info->name);
3161 else
3162 PCIE_DBG2(dev,
3163 "PCIe: RC%d successfully deasserted reset for %s.\n",
3164 dev->rc_idx, reset_info->name);
3165 }
3166 }
3167
3168 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3169
3170 return rc;
3171}
3172
3173static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
3174{
3175 int i;
3176 int rc;
3177
3178 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3179
3180 for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
3181 if (dev->clk[i].hdl)
3182 clk_disable_unprepare(dev->clk[i].hdl);
3183
3184 if (dev->bus_client) {
3185 PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
3186 dev->rc_idx);
3187
3188 rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
3189 if (rc)
3190 PCIE_ERR(dev,
3191 "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
3192 dev->rc_idx, rc);
3193 else
3194 PCIE_DBG(dev,
3195 "PCIe: relinquish bus bandwidth for RC%d.\n",
3196 dev->rc_idx);
3197 }
3198
3199 if (dev->gdsc_smmu)
3200 regulator_disable(dev->gdsc_smmu);
3201
3202 regulator_disable(dev->gdsc);
3203
3204 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3205}
3206
3207static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
3208{
3209 int i, rc = 0;
3210 struct msm_pcie_clk_info_t *info;
3211 struct msm_pcie_reset_info_t *pipe_reset_info;
3212
3213 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3214
3215 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3216 info = &dev->pipeclk[i];
3217
3218 if (!info->hdl)
3219 continue;
3220
3221
3222 if (info->config_mem)
3223 msm_pcie_config_clock_mem(dev, info);
3224
3225 if (info->freq) {
3226 rc = clk_set_rate(info->hdl, info->freq);
3227 if (rc) {
3228 PCIE_ERR(dev,
3229 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3230 dev->rc_idx, info->name, rc);
3231 break;
3232 }
3233
3234 PCIE_DBG2(dev,
3235 "PCIe: RC%d set rate for clk %s: %d.\n",
3236 dev->rc_idx, info->name, rc);
3237 }
3238
3239 rc = clk_prepare_enable(info->hdl);
3240
3241 if (rc)
3242 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
3243 dev->rc_idx, info->name);
3244 else
3245 PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
3246 dev->rc_idx, info->name);
3247 }
3248
3249 if (rc) {
3250 PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
3251 dev->rc_idx);
3252 while (i--)
3253 if (dev->pipeclk[i].hdl)
3254 clk_disable_unprepare(dev->pipeclk[i].hdl);
3255 }
3256
3257 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3258 pipe_reset_info = &dev->pipe_reset[i];
3259 if (pipe_reset_info->hdl) {
Tony Truongc21087d2017-05-01 17:58:06 -07003260 rc = reset_control_assert(pipe_reset_info->hdl);
3261 if (rc)
3262 PCIE_ERR(dev,
3263 "PCIe: RC%d failed to assert pipe reset for %s.\n",
3264 dev->rc_idx, pipe_reset_info->name);
3265 else
3266 PCIE_DBG2(dev,
3267 "PCIe: RC%d successfully asserted pipe reset for %s.\n",
3268 dev->rc_idx, pipe_reset_info->name);
3269
3270 /* add a 1ms delay to ensure the reset is asserted */
3271 usleep_range(1000, 1005);
3272
Tony Truong349ee492014-10-01 17:35:56 -07003273 rc = reset_control_deassert(
3274 pipe_reset_info->hdl);
3275 if (rc)
3276 PCIE_ERR(dev,
3277 "PCIe: RC%d failed to deassert pipe reset for %s.\n",
3278 dev->rc_idx, pipe_reset_info->name);
3279 else
3280 PCIE_DBG2(dev,
3281 "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
3282 dev->rc_idx, pipe_reset_info->name);
3283 }
3284 }
3285
3286 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3287
3288 return rc;
3289}
3290
3291static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
3292{
3293 int i;
3294
3295 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3296
3297 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
3298 if (dev->pipeclk[i].hdl)
3299 clk_disable_unprepare(
3300 dev->pipeclk[i].hdl);
3301
3302 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3303}
3304
3305static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
3306{
3307 int i;
3308 u8 type;
3309 struct msm_pcie_device_info *dev_table = dev->pcidev_table;
3310
3311 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3312 if (!dev_table[i].bdf)
3313 break;
3314
3315 type = dev_table[i].bdf >> 24 == 0x1 ?
3316 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3317
3318 msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
3319 dev_table[i].phy_address + SZ_4K - 1,
3320 dev_table[i].bdf);
3321 }
3322}
3323
3324static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
3325{
Tony Truong349ee492014-10-01 17:35:56 -07003326 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3327
3328 /*
3329 * program and enable address translation region 0 (device config
3330 * address space); region type config;
3331 * axi config address range to device config address range
3332 */
3333 if (dev->enumerated) {
3334 msm_pcie_iatu_config_all_ep(dev);
3335 } else {
3336 dev->current_bdf = 0; /* to force IATU re-config */
3337 msm_pcie_cfg_bdf(dev, 1, 0);
3338 }
3339
3340 /* configure N_FTS */
3341 PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3342 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3343 if (!dev->n_fts)
3344 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3345 0, BIT(15));
3346 else
3347 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3348 PCIE20_ACK_N_FTS,
3349 dev->n_fts << 8);
3350
3351 if (dev->shadow_en)
3352 dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
3353 readl_relaxed(dev->dm_core +
3354 PCIE20_ACK_F_ASPM_CTRL_REG);
3355
3356 PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3357 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3358
3359 /* configure AUX clock frequency register for PCIe core */
3360 if (dev->use_19p2mhz_aux_clk)
3361 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
3362 else
3363 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
3364
3365 /* configure the completion timeout value for PCIe core */
3366 if (dev->cpl_timeout && dev->bridge_found)
3367 msm_pcie_write_reg_field(dev->dm_core,
3368 PCIE20_DEVICE_CONTROL2_STATUS2,
3369 0xf, dev->cpl_timeout);
3370
3371 /* Enable AER on RC */
3372 if (dev->aer_enable) {
3373 msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
3374 BIT(16)|BIT(17));
3375 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
3376 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3377
3378 PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3379 readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
3380 }
Tony Truong349ee492014-10-01 17:35:56 -07003381}
3382
Stephen Boydb5b8fc32017-06-21 08:59:11 -07003383static void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07003384{
3385 int i;
3386
3387 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3388
3389 /* program MSI controller and enable all interrupts */
3390 writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
3391 writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
3392
3393 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
3394 writel_relaxed(~0, dev->dm_core +
3395 PCIE20_MSI_CTRL_INTR_EN + (i * 12));
3396
3397 /* ensure that hardware is configured before proceeding */
3398 wmb();
3399}
3400
3401static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
3402 struct platform_device *pdev)
3403{
3404 int i, len, cnt, ret = 0, size = 0;
3405 struct msm_pcie_vreg_info_t *vreg_info;
3406 struct msm_pcie_gpio_info_t *gpio_info;
3407 struct msm_pcie_clk_info_t *clk_info;
3408 struct resource *res;
3409 struct msm_pcie_res_info_t *res_info;
3410 struct msm_pcie_irq_info_t *irq_info;
3411 struct msm_pcie_irq_info_t *msi_info;
3412 struct msm_pcie_reset_info_t *reset_info;
3413 struct msm_pcie_reset_info_t *pipe_reset_info;
3414 char prop_name[MAX_PROP_SIZE];
3415 const __be32 *prop;
3416 u32 *clkfreq = NULL;
3417
3418 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3419
3420 cnt = of_property_count_strings((&pdev->dev)->of_node,
3421 "clock-names");
3422 if (cnt > 0) {
3423 clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
3424 sizeof(*clkfreq), GFP_KERNEL);
3425 if (!clkfreq) {
3426 PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
3427 dev->rc_idx);
3428 return -ENOMEM;
3429 }
3430 ret = of_property_read_u32_array(
3431 (&pdev->dev)->of_node,
3432 "max-clock-frequency-hz", clkfreq, cnt);
3433 if (ret) {
3434 PCIE_ERR(dev,
3435 "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
3436 dev->rc_idx, ret);
3437 goto out;
3438 }
3439 }
3440
3441 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
3442 vreg_info = &dev->vreg[i];
3443 vreg_info->hdl =
3444 devm_regulator_get(&pdev->dev, vreg_info->name);
3445
3446 if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
3447 PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
3448 vreg_info->name);
3449 ret = PTR_ERR(vreg_info->hdl);
3450 goto out;
3451 }
3452
3453 if (IS_ERR(vreg_info->hdl)) {
3454 if (vreg_info->required) {
3455 PCIE_DBG(dev, "Vreg %s doesn't exist\n",
3456 vreg_info->name);
3457 ret = PTR_ERR(vreg_info->hdl);
3458 goto out;
3459 } else {
3460 PCIE_DBG(dev,
3461 "Optional Vreg %s doesn't exist\n",
3462 vreg_info->name);
3463 vreg_info->hdl = NULL;
3464 }
3465 } else {
3466 dev->vreg_n++;
3467 snprintf(prop_name, MAX_PROP_SIZE,
3468 "qcom,%s-voltage-level", vreg_info->name);
3469 prop = of_get_property((&pdev->dev)->of_node,
3470 prop_name, &len);
3471 if (!prop || (len != (3 * sizeof(__be32)))) {
3472 PCIE_DBG(dev, "%s %s property\n",
3473 prop ? "invalid format" :
3474 "no", prop_name);
3475 } else {
3476 vreg_info->max_v = be32_to_cpup(&prop[0]);
3477 vreg_info->min_v = be32_to_cpup(&prop[1]);
3478 vreg_info->opt_mode =
3479 be32_to_cpup(&prop[2]);
3480 }
3481 }
3482 }
3483
3484 dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
3485
3486 if (IS_ERR(dev->gdsc)) {
3487 PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
3488 dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
3489 if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
3490 PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
3491 dev->pdev->name);
3492 ret = PTR_ERR(dev->gdsc);
3493 goto out;
3494 }
3495
3496 dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
3497
3498 if (IS_ERR(dev->gdsc_smmu)) {
3499 PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
3500 dev->rc_idx);
3501 dev->gdsc_smmu = NULL;
3502 }
3503
3504 dev->gpio_n = 0;
3505 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
3506 gpio_info = &dev->gpio[i];
3507 ret = of_get_named_gpio((&pdev->dev)->of_node,
3508 gpio_info->name, 0);
3509 if (ret >= 0) {
3510 gpio_info->num = ret;
3511 dev->gpio_n++;
3512 PCIE_DBG(dev, "GPIO num for %s is %d\n",
3513 gpio_info->name, gpio_info->num);
3514 } else {
3515 if (gpio_info->required) {
3516 PCIE_ERR(dev,
3517 "Could not get required GPIO %s\n",
3518 gpio_info->name);
3519 goto out;
3520 } else {
3521 PCIE_DBG(dev,
3522 "Could not get optional GPIO %s\n",
3523 gpio_info->name);
3524 }
3525 }
3526 ret = 0;
3527 }
3528
3529 of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
3530 if (size) {
3531 dev->phy_sequence = (struct msm_pcie_phy_info_t *)
3532 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
3533
3534 if (dev->phy_sequence) {
3535 dev->phy_len =
3536 size / sizeof(*dev->phy_sequence);
3537
3538 of_property_read_u32_array(pdev->dev.of_node,
3539 "qcom,phy-sequence",
3540 (unsigned int *)dev->phy_sequence,
3541 size / sizeof(dev->phy_sequence->offset));
3542 } else {
3543 PCIE_ERR(dev,
3544 "RC%d: Could not allocate memory for phy init sequence.\n",
3545 dev->rc_idx);
3546 ret = -ENOMEM;
3547 goto out;
3548 }
3549 } else {
3550 PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
3551 dev->rc_idx);
3552 }
3553
Tony Truong349ee492014-10-01 17:35:56 -07003554 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3555 clk_info = &dev->clk[i];
3556
3557 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
3558
3559 if (IS_ERR(clk_info->hdl)) {
3560 if (clk_info->required) {
3561 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
3562 clk_info->name, PTR_ERR(clk_info->hdl));
3563 ret = PTR_ERR(clk_info->hdl);
3564 goto out;
3565 } else {
3566 PCIE_DBG(dev, "Ignoring Clock %s\n",
3567 clk_info->name);
3568 clk_info->hdl = NULL;
3569 }
3570 } else {
3571 if (clkfreq != NULL) {
3572 clk_info->freq = clkfreq[i +
3573 MSM_PCIE_MAX_PIPE_CLK];
3574 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
3575 clk_info->name, clk_info->freq);
3576 }
3577 }
3578 }
3579
3580 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3581 clk_info = &dev->pipeclk[i];
3582
3583 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
3584
3585 if (IS_ERR(clk_info->hdl)) {
3586 if (clk_info->required) {
3587 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
3588 clk_info->name, PTR_ERR(clk_info->hdl));
3589 ret = PTR_ERR(clk_info->hdl);
3590 goto out;
3591 } else {
3592 PCIE_DBG(dev, "Ignoring Clock %s\n",
3593 clk_info->name);
3594 clk_info->hdl = NULL;
3595 }
3596 } else {
3597 if (clkfreq != NULL) {
3598 clk_info->freq = clkfreq[i];
3599 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
3600 clk_info->name, clk_info->freq);
3601 }
3602 }
3603 }
3604
3605 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3606 reset_info = &dev->reset[i];
3607
3608 reset_info->hdl = devm_reset_control_get(&pdev->dev,
3609 reset_info->name);
3610
3611 if (IS_ERR(reset_info->hdl)) {
3612 if (reset_info->required) {
3613 PCIE_DBG(dev,
3614 "Reset %s isn't available:%ld\n",
3615 reset_info->name,
3616 PTR_ERR(reset_info->hdl));
3617
3618 ret = PTR_ERR(reset_info->hdl);
3619 reset_info->hdl = NULL;
3620 goto out;
3621 } else {
3622 PCIE_DBG(dev, "Ignoring Reset %s\n",
3623 reset_info->name);
3624 reset_info->hdl = NULL;
3625 }
3626 }
3627 }
3628
3629 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3630 pipe_reset_info = &dev->pipe_reset[i];
3631
3632 pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
3633 pipe_reset_info->name);
3634
3635 if (IS_ERR(pipe_reset_info->hdl)) {
3636 if (pipe_reset_info->required) {
3637 PCIE_DBG(dev,
3638 "Pipe Reset %s isn't available:%ld\n",
3639 pipe_reset_info->name,
3640 PTR_ERR(pipe_reset_info->hdl));
3641
3642 ret = PTR_ERR(pipe_reset_info->hdl);
3643 pipe_reset_info->hdl = NULL;
3644 goto out;
3645 } else {
3646 PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
3647 pipe_reset_info->name);
3648 pipe_reset_info->hdl = NULL;
3649 }
3650 }
3651 }
3652
3653 dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3654 if (!dev->bus_scale_table) {
3655 PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
3656 dev->rc_idx, dev->pdev->name);
3657 dev->bus_client = 0;
3658 } else {
3659 dev->bus_client =
3660 msm_bus_scale_register_client(dev->bus_scale_table);
3661 if (!dev->bus_client) {
3662 PCIE_ERR(dev,
3663 "PCIe: Failed to register bus client for RC%d (%s)\n",
3664 dev->rc_idx, dev->pdev->name);
3665 msm_bus_cl_clear_pdata(dev->bus_scale_table);
3666 ret = -ENODEV;
3667 goto out;
3668 }
3669 }
3670
3671 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
3672 res_info = &dev->res[i];
3673
3674 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3675 res_info->name);
3676
3677 if (!res) {
3678 PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
3679 dev->rc_idx, res_info->name);
3680 } else {
3681 PCIE_DBG(dev, "start addr for %s is %pa.\n",
3682 res_info->name, &res->start);
3683
3684 res_info->base = devm_ioremap(&pdev->dev,
3685 res->start, resource_size(res));
3686 if (!res_info->base) {
3687 PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
3688 dev->rc_idx, res_info->name);
3689 ret = -ENOMEM;
3690 goto out;
3691 } else {
3692 res_info->resource = res;
3693 }
3694 }
3695 }
3696
3697 for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
3698 irq_info = &dev->irq[i];
3699
3700 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
3701 irq_info->name);
3702
3703 if (!res) {
3704 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
3705 dev->rc_idx, irq_info->name);
3706 } else {
3707 irq_info->num = res->start;
3708 PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
3709 irq_info->num);
3710 }
3711 }
3712
3713 for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
3714 msi_info = &dev->msi[i];
3715
3716 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
3717 msi_info->name);
3718
3719 if (!res) {
3720 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
3721 dev->rc_idx, msi_info->name);
3722 } else {
3723 msi_info->num = res->start;
3724 PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
3725 msi_info->num);
3726 }
3727 }
3728
3729 /* All allocations succeeded */
3730
3731 if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
3732 dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
3733 else
3734 dev->wake_n = 0;
3735
3736 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
3737 dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
3738 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
Tony Truongf49801f2017-10-25 11:22:35 -07003739 dev->iatu = dev->res[MSM_PCIE_RES_IATU].base;
Tony Truong349ee492014-10-01 17:35:56 -07003740 dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
3741 dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
3742 dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
3743 dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
3744 dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
3745 dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
3746 dev->dev_io_res->flags = IORESOURCE_IO;
3747
3748out:
3749 kfree(clkfreq);
3750
3751 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3752
3753 return ret;
3754}
3755
3756static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
3757{
3758 dev->parf = NULL;
3759 dev->elbi = NULL;
Tony Truongf49801f2017-10-25 11:22:35 -07003760 dev->iatu = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07003761 dev->dm_core = NULL;
3762 dev->conf = NULL;
3763 dev->bars = NULL;
3764 dev->tcsr = NULL;
3765 dev->dev_mem_res = NULL;
3766 dev->dev_io_res = NULL;
3767}
3768
Tony Truongc275fe02017-04-18 19:04:20 -07003769static void msm_pcie_setup_gen3(struct msm_pcie_dev_t *dev)
3770{
3771 PCIE_DBG(dev, "PCIe: RC%d: Setting up Gen3\n", dev->rc_idx);
3772
3773 msm_pcie_write_reg_field(dev->dm_core,
3774 PCIE_GEN3_L0_DRVR_CTRL0, 0x1ff00, BIT(0));
3775
3776 msm_pcie_write_reg(dev->dm_core,
3777 PCIE_GEN3_L0_BIST_ERR_CNT2_STATUS,
3778 (0x05 << 14) | (0x05 << 10) | (0x0d << 5));
3779
3780 msm_pcie_write_mask(dev->dm_core +
3781 PCIE_GEN3_L0_BIST_ERR_CNT1_STATUS, BIT(4), 0);
3782
3783 msm_pcie_write_mask(dev->dm_core +
3784 PCIE_GEN3_L0_RESET_GEN, BIT(0), 0);
3785
3786 /* configure PCIe preset */
3787 msm_pcie_write_reg(dev->dm_core,
3788 PCIE_GEN3_L0_DEBUG_BUS_STATUS4, 1);
3789 msm_pcie_write_reg(dev->dm_core,
3790 PCIE_GEN3_COM_INTEGLOOP_GAIN1_MODE0, 0x77777777);
3791 msm_pcie_write_reg(dev->dm_core,
3792 PCIE_GEN3_L0_DEBUG_BUS_STATUS4, 1);
3793
3794 msm_pcie_write_reg_field(dev->dm_core,
3795 PCIE20_CAP + PCI_EXP_LNKCTL2,
3796 PCI_EXP_LNKCAP_SLS, GEN3_SPEED);
3797}
3798
Stephen Boydb5b8fc32017-06-21 08:59:11 -07003799static int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
Tony Truong349ee492014-10-01 17:35:56 -07003800{
3801 int ret = 0;
3802 uint32_t val;
3803 long int retries = 0;
3804 int link_check_count = 0;
Tony Truong74ee0fd2017-10-06 19:37:43 -07003805 unsigned long ep_up_timeout = 0;
Tony Truong349ee492014-10-01 17:35:56 -07003806
3807 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3808
3809 mutex_lock(&dev->setup_lock);
3810
3811 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
3812 PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
3813 dev->rc_idx);
3814 goto out;
3815 }
3816
3817 /* assert PCIe reset link to keep EP in reset */
3818
3819 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3820 dev->rc_idx);
3821 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3822 dev->gpio[MSM_PCIE_GPIO_PERST].on);
3823 usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
3824 PERST_PROPAGATION_DELAY_US_MAX);
3825
3826 /* enable power */
3827
3828 if (options & PM_VREG) {
3829 ret = msm_pcie_vreg_init(dev);
3830 if (ret)
3831 goto out;
3832 }
3833
3834 /* enable clocks */
3835 if (options & PM_CLK) {
3836 ret = msm_pcie_clk_init(dev);
3837 /* ensure that changes propagated to the hardware */
3838 wmb();
3839 if (ret)
3840 goto clk_fail;
3841 }
3842
3843 if (dev->scm_dev_id) {
3844 PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
3845 msm_pcie_restore_sec_config(dev);
3846 }
3847
Tony Truongb213ac12017-04-05 15:21:20 -07003848 /* configure PCIe to RC mode */
3849 msm_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x4);
3850
3851 /* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
3852 if (dev->l1_supported)
3853 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
3854
Tony Truong349ee492014-10-01 17:35:56 -07003855 /* enable PCIe clocks and resets */
3856 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
3857
3858 /* change DBI base address */
3859 writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
3860
3861 writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
3862
3863 msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
3864 0, BIT(4));
3865
3866 /* enable selected IRQ */
3867 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
3868 msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
3869
3870 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
3871 BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
3872 BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
3873 BIT(MSM_PCIE_INT_EVT_AER_ERR) |
3874 BIT(MSM_PCIE_INT_EVT_MSI_0) |
3875 BIT(MSM_PCIE_INT_EVT_MSI_1) |
3876 BIT(MSM_PCIE_INT_EVT_MSI_2) |
3877 BIT(MSM_PCIE_INT_EVT_MSI_3) |
3878 BIT(MSM_PCIE_INT_EVT_MSI_4) |
3879 BIT(MSM_PCIE_INT_EVT_MSI_5) |
3880 BIT(MSM_PCIE_INT_EVT_MSI_6) |
3881 BIT(MSM_PCIE_INT_EVT_MSI_7));
3882
3883 PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
3884 dev->rc_idx,
3885 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
3886 }
3887
Tony Truong41e63ec2017-08-30 12:08:12 -07003888 writel_relaxed(dev->slv_addr_space_size, dev->parf +
3889 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
Tony Truong349ee492014-10-01 17:35:56 -07003890
3891 if (dev->use_msi) {
3892 PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
3893 val = dev->wr_halt_size ? dev->wr_halt_size :
3894 readl_relaxed(dev->parf +
3895 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
3896
3897 msm_pcie_write_reg(dev->parf,
3898 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
3899 BIT(31) | val);
3900
3901 PCIE_DBG(dev,
3902 "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
3903 dev->rc_idx,
3904 readl_relaxed(dev->parf +
3905 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
3906 }
3907
Tony Truong349ee492014-10-01 17:35:56 -07003908 /* init PCIe PHY */
Tony Truong232cf4d2017-08-22 18:28:24 -07003909 pcie_phy_init(dev);
Tony Truong349ee492014-10-01 17:35:56 -07003910
3911 if (options & PM_PIPE_CLK) {
3912 usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
3913 PHY_STABILIZATION_DELAY_US_MAX);
3914 /* Enable the pipe clock */
3915 ret = msm_pcie_pipe_clk_init(dev);
3916 /* ensure that changes propagated to the hardware */
3917 wmb();
3918 if (ret)
3919 goto link_fail;
3920 }
3921
Tony Truongc275fe02017-04-18 19:04:20 -07003922 /* check capability for max link speed */
3923 if (!dev->max_link_speed) {
3924 val = readl_relaxed(dev->dm_core + PCIE20_CAP + PCI_EXP_LNKCAP);
3925 dev->max_link_speed = val & PCI_EXP_LNKCAP_SLS;
3926 }
3927
Tony Truong349ee492014-10-01 17:35:56 -07003928 PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
3929
3930 do {
3931 if (pcie_phy_is_ready(dev))
3932 break;
3933 retries++;
3934 usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
3935 REFCLK_STABILIZATION_DELAY_US_MAX);
3936 } while (retries < PHY_READY_TIMEOUT_COUNT);
3937
3938 PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
3939 dev->rc_idx, retries);
3940
3941 if (pcie_phy_is_ready(dev))
3942 PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
3943 else {
3944 PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
3945 dev->rc_idx);
3946 ret = -ENODEV;
3947 pcie_phy_dump(dev);
3948 goto link_fail;
3949 }
3950
Tony Truong349ee492014-10-01 17:35:56 -07003951 if (dev->ep_latency)
3952 usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
3953
3954 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
3955 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
3956 dev->gpio[MSM_PCIE_GPIO_EP].on);
3957
3958 /* de-assert PCIe reset link to bring EP out of reset */
3959
3960 PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
3961 dev->rc_idx);
3962 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3963 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
3964 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
3965
Tony Truong74ee0fd2017-10-06 19:37:43 -07003966 ep_up_timeout = jiffies + usecs_to_jiffies(EP_UP_TIMEOUT_US);
3967
Tony Truongc275fe02017-04-18 19:04:20 -07003968 /* setup Gen3 specific configurations */
3969 if (dev->max_link_speed == GEN3_SPEED)
3970 msm_pcie_setup_gen3(dev);
3971
Tony Truong349ee492014-10-01 17:35:56 -07003972 /* set max tlp read size */
3973 msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
3974 0x7000, dev->tlp_rd_size);
3975
3976 /* enable link training */
3977 msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
3978
3979 PCIE_DBG(dev, "%s", "check if link is up\n");
3980
3981 /* Wait for up to 100ms for the link to come up */
3982 do {
3983 usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
3984 val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
Tony Truongf9f5ff02017-03-29 11:28:44 -07003985 PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
3986 dev->rc_idx, (val >> 12) & 0x3f);
Tony Truong349ee492014-10-01 17:35:56 -07003987 } while ((!(val & XMLH_LINK_UP) ||
3988 !msm_pcie_confirm_linkup(dev, false, false, NULL))
3989 && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
3990
3991 if ((val & XMLH_LINK_UP) &&
3992 msm_pcie_confirm_linkup(dev, false, false, NULL)) {
3993 PCIE_DBG(dev, "Link is up after %d checkings\n",
3994 link_check_count);
3995 PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
3996 } else {
3997 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3998 dev->rc_idx);
3999 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4000 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4001 PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
4002 dev->rc_idx);
4003 ret = -1;
4004 goto link_fail;
4005 }
4006
Tony Truongab961342017-09-14 16:11:31 -07004007 dev->link_status = MSM_PCIE_LINK_ENABLED;
4008 dev->power_on = true;
4009 dev->suspending = false;
4010 dev->link_turned_on_counter++;
4011
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +05304012 if (dev->switch_latency) {
4013 PCIE_DBG(dev, "switch_latency: %dms\n",
4014 dev->switch_latency);
4015 if (dev->switch_latency <= SWITCH_DELAY_MAX)
4016 usleep_range(dev->switch_latency * 1000,
4017 dev->switch_latency * 1000);
4018 else
4019 msleep(dev->switch_latency);
4020 }
4021
Tony Truong349ee492014-10-01 17:35:56 -07004022 msm_pcie_config_controller(dev);
4023
Tony Truong74ee0fd2017-10-06 19:37:43 -07004024 /* check endpoint configuration space is accessible */
4025 while (time_before(jiffies, ep_up_timeout)) {
4026 if (readl_relaxed(dev->conf) != PCIE_LINK_DOWN)
4027 break;
4028 usleep_range(EP_UP_TIMEOUT_US_MIN, EP_UP_TIMEOUT_US_MAX);
4029 }
4030
4031 if (readl_relaxed(dev->conf) != PCIE_LINK_DOWN) {
4032 PCIE_DBG(dev,
4033 "PCIe: RC%d: endpoint config space is accessible\n",
4034 dev->rc_idx);
4035 } else {
4036 PCIE_ERR(dev,
4037 "PCIe: RC%d: endpoint config space is not accessible\n",
4038 dev->rc_idx);
4039 dev->link_status = MSM_PCIE_LINK_DISABLED;
4040 dev->power_on = false;
4041 dev->link_turned_off_counter++;
4042 ret = -ENODEV;
4043 goto link_fail;
4044 }
4045
Tony Truong349ee492014-10-01 17:35:56 -07004046 if (!dev->msi_gicm_addr)
4047 msm_pcie_config_msi_controller(dev);
4048
Tony Truongb1af8b62017-05-31 15:40:38 -07004049 if (dev->enumerated) {
Tony Truong7772e692017-04-13 17:03:34 -07004050 pci_walk_bus(dev->dev->bus, &msm_pcie_config_device, dev);
Tony Truongb1af8b62017-05-31 15:40:38 -07004051 msm_pcie_config_link_pm_rc(dev, dev->dev, true);
4052 }
Tony Truong7772e692017-04-13 17:03:34 -07004053
Tony Truong349ee492014-10-01 17:35:56 -07004054 goto out;
4055
4056link_fail:
Tony Truong7416d722017-09-12 16:45:18 -07004057 if (msm_pcie_keep_resources_on & BIT(dev->rc_idx))
4058 goto out;
4059
Tony Truong349ee492014-10-01 17:35:56 -07004060 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4061 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4062 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
Tony Truongc275fe02017-04-18 19:04:20 -07004063
4064 if (dev->max_link_speed != GEN3_SPEED) {
4065 msm_pcie_write_reg(dev->phy,
4066 PCIE_N_SW_RESET(dev->rc_idx), 0x1);
4067 msm_pcie_write_reg(dev->phy,
4068 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx), 0);
4069 }
Tony Truong349ee492014-10-01 17:35:56 -07004070
4071 msm_pcie_pipe_clk_deinit(dev);
4072 msm_pcie_clk_deinit(dev);
4073clk_fail:
4074 msm_pcie_vreg_deinit(dev);
4075out:
4076 mutex_unlock(&dev->setup_lock);
4077
4078 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4079
4080 return ret;
4081}
4082
Stephen Boydb5b8fc32017-06-21 08:59:11 -07004083static void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
Tony Truong349ee492014-10-01 17:35:56 -07004084{
4085 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4086
4087 mutex_lock(&dev->setup_lock);
4088
4089 if (!dev->power_on) {
4090 PCIE_DBG(dev,
4091 "PCIe: the link of RC%d is already power down.\n",
4092 dev->rc_idx);
4093 mutex_unlock(&dev->setup_lock);
4094 return;
4095 }
4096
4097 dev->link_status = MSM_PCIE_LINK_DISABLED;
4098 dev->power_on = false;
4099 dev->link_turned_off_counter++;
4100
4101 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4102 dev->rc_idx);
4103
4104 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4105 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4106
Tony Truongc275fe02017-04-18 19:04:20 -07004107 if (dev->max_link_speed != GEN3_SPEED) {
4108 msm_pcie_write_reg(dev->phy,
4109 PCIE_N_SW_RESET(dev->rc_idx), 0x1);
4110 msm_pcie_write_reg(dev->phy,
4111 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx), 0);
4112 }
Tony Truong349ee492014-10-01 17:35:56 -07004113
4114 if (options & PM_CLK) {
4115 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
4116 BIT(0));
4117 msm_pcie_clk_deinit(dev);
4118 }
4119
4120 if (options & PM_VREG)
4121 msm_pcie_vreg_deinit(dev);
4122
4123 if (options & PM_PIPE_CLK)
4124 msm_pcie_pipe_clk_deinit(dev);
4125
4126 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4127 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4128 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4129
4130 mutex_unlock(&dev->setup_lock);
4131
4132 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4133}
4134
4135static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
4136 struct msm_pcie_device_info *ep_dev_info)
4137{
4138 u32 val;
4139 void __iomem *ep_base = ep_dev_info->conf_base;
4140 u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
4141 0xff;
4142
4143 while (current_offset) {
4144 if (msm_pcie_check_align(dev, current_offset))
4145 return;
4146
4147 val = readl_relaxed(ep_base + current_offset);
4148 if ((val & 0xff) == PCIE20_CAP_ID) {
4149 ep_dev_info->dev_ctrlstts_offset =
4150 current_offset + 0x8;
4151 break;
4152 }
4153 current_offset = (val >> 8) & 0xff;
4154 }
4155
4156 if (!ep_dev_info->dev_ctrlstts_offset) {
4157 PCIE_DBG(dev,
4158 "RC%d endpoint does not support PCIe cap registers\n",
4159 dev->rc_idx);
4160 return;
4161 }
4162
4163 PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
4164 dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
4165
4166 /* Enable AER on EP */
4167 msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
4168 BIT(3)|BIT(2)|BIT(1)|BIT(0));
4169
4170 PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
4171 readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
4172}
4173
4174static int msm_pcie_config_device_table(struct device *dev, void *pdev)
4175{
4176 struct pci_dev *pcidev = to_pci_dev(dev);
4177 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
4178 struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
4179 struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
4180 int ret = 0;
4181 u32 rc_idx = pcie_dev->rc_idx;
4182 u32 i, index;
4183 u32 bdf = 0;
4184 u8 type;
4185 u32 h_type;
4186 u32 bme;
4187
4188 if (!pcidev) {
4189 PCIE_ERR(pcie_dev,
4190 "PCIe: Did not find PCI device in list for RC%d.\n",
4191 pcie_dev->rc_idx);
4192 return -ENODEV;
4193 }
4194
4195 PCIE_DBG(pcie_dev,
4196 "PCI device found: vendor-id:0x%x device-id:0x%x\n",
4197 pcidev->vendor, pcidev->device);
4198
4199 if (!pcidev->bus->number)
4200 return ret;
4201
4202 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4203 type = pcidev->bus->number == 1 ?
4204 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
4205
4206 for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
4207 if (msm_pcie_dev_tbl[i].bdf == bdf &&
4208 !msm_pcie_dev_tbl[i].dev) {
4209 for (index = 0; index < MAX_DEVICE_NUM; index++) {
4210 if (dev_table_t[index].bdf == bdf) {
4211 msm_pcie_dev_tbl[i].dev = pcidev;
4212 msm_pcie_dev_tbl[i].domain = rc_idx;
4213 msm_pcie_dev_tbl[i].conf_base =
4214 pcie_dev->conf + index * SZ_4K;
4215 msm_pcie_dev_tbl[i].phy_address =
4216 axi_conf->start + index * SZ_4K;
4217
4218 dev_table_t[index].dev = pcidev;
4219 dev_table_t[index].domain = rc_idx;
4220 dev_table_t[index].conf_base =
4221 pcie_dev->conf + index * SZ_4K;
4222 dev_table_t[index].phy_address =
4223 axi_conf->start + index * SZ_4K;
4224
4225 msm_pcie_iatu_config(pcie_dev, index,
4226 type,
4227 dev_table_t[index].phy_address,
4228 dev_table_t[index].phy_address
4229 + SZ_4K - 1,
4230 bdf);
4231
4232 h_type = readl_relaxed(
4233 dev_table_t[index].conf_base +
4234 PCIE20_HEADER_TYPE);
4235
4236 bme = readl_relaxed(
4237 dev_table_t[index].conf_base +
4238 PCIE20_COMMAND_STATUS);
4239
4240 if (h_type & (1 << 16)) {
4241 pci_write_config_dword(pcidev,
4242 PCIE20_COMMAND_STATUS,
4243 bme | 0x06);
4244 } else {
4245 pcie_dev->num_ep++;
4246 dev_table_t[index].registered =
4247 false;
4248 }
4249
4250 if (pcie_dev->num_ep > 1)
4251 pcie_dev->pending_ep_reg = true;
4252
Tony Truong7416d722017-09-12 16:45:18 -07004253 if (pcie_dev->aer_enable)
4254 msm_pcie_config_ep_aer(pcie_dev,
4255 &dev_table_t[index]);
Tony Truong349ee492014-10-01 17:35:56 -07004256
4257 break;
4258 }
4259 }
4260 if (index == MAX_DEVICE_NUM) {
4261 PCIE_ERR(pcie_dev,
4262 "RC%d PCI device table is full.\n",
4263 rc_idx);
4264 ret = index;
4265 } else {
4266 break;
4267 }
4268 } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
4269 pcidev == msm_pcie_dev_tbl[i].dev) {
4270 break;
4271 }
4272 }
4273 if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
4274 PCIE_ERR(pcie_dev,
4275 "Global PCI device table is full: %d elements.\n",
4276 i);
4277 PCIE_ERR(pcie_dev,
4278 "Bus number is 0x%x\nDevice number is 0x%x\n",
4279 pcidev->bus->number, pcidev->devfn);
4280 ret = i;
4281 }
4282 return ret;
4283}
4284
Tony Truong2a022a02017-04-13 14:04:30 -07004285static void msm_pcie_configure_sid(struct msm_pcie_dev_t *pcie_dev,
4286 struct pci_dev *dev)
Tony Truong349ee492014-10-01 17:35:56 -07004287{
Tony Truong2a022a02017-04-13 14:04:30 -07004288 u32 offset;
4289 u32 sid;
Tony Truong349ee492014-10-01 17:35:56 -07004290 u32 bdf;
Tony Truong2a022a02017-04-13 14:04:30 -07004291 int ret;
Tony Truong349ee492014-10-01 17:35:56 -07004292
Tony Truong2a022a02017-04-13 14:04:30 -07004293 ret = iommu_fwspec_get_id(&dev->dev, &sid);
4294 if (ret) {
Tony Truong349ee492014-10-01 17:35:56 -07004295 PCIE_DBG(pcie_dev,
Tony Truong2a022a02017-04-13 14:04:30 -07004296 "PCIe: RC%d: Device does not have a SID\n",
Tony Truong349ee492014-10-01 17:35:56 -07004297 pcie_dev->rc_idx);
Tony Truong2a022a02017-04-13 14:04:30 -07004298 return;
Tony Truong349ee492014-10-01 17:35:56 -07004299 }
4300
4301 PCIE_DBG(pcie_dev,
Tony Truong2a022a02017-04-13 14:04:30 -07004302 "PCIe: RC%d: Device SID: 0x%x\n",
4303 pcie_dev->rc_idx, sid);
Tony Truong349ee492014-10-01 17:35:56 -07004304
Tony Truong2a022a02017-04-13 14:04:30 -07004305 bdf = BDF_OFFSET(dev->bus->number, dev->devfn);
4306 offset = (sid - pcie_dev->smmu_sid_base) * 4;
4307
4308 if (offset >= MAX_SHORT_BDF_NUM * 4) {
4309 PCIE_ERR(pcie_dev,
4310 "PCIe: RC%d: Invalid SID offset: 0x%x. Should be less than 0x%x\n",
4311 pcie_dev->rc_idx, offset, MAX_SHORT_BDF_NUM * 4);
4312 return;
4313 }
4314
4315 msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
4316 msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_SID_OFFSET, 0);
4317 msm_pcie_write_reg(pcie_dev->parf,
4318 PCIE20_PARF_BDF_TRANSLATE_N + offset, bdf >> 16);
Tony Truong349ee492014-10-01 17:35:56 -07004319}
Tony Truong349ee492014-10-01 17:35:56 -07004320
4321int msm_pcie_enumerate(u32 rc_idx)
4322{
4323 int ret = 0, bus_ret = 0, scan_ret = 0;
4324 struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
4325
4326 mutex_lock(&dev->enumerate_lock);
4327
4328 PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
4329
4330 if (!dev->drv_ready) {
4331 PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
4332 rc_idx);
4333 ret = -EPROBE_DEFER;
4334 goto out;
4335 }
4336
4337 if (!dev->enumerated) {
4338 ret = msm_pcie_enable(dev, PM_ALL);
4339
4340 /* kick start ARM PCI configuration framework */
4341 if (!ret) {
4342 struct pci_dev *pcidev = NULL;
4343 bool found = false;
4344 struct pci_bus *bus;
4345 resource_size_t iobase = 0;
4346 u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
4347 u32 vendor_id = ids & 0xffff;
4348 u32 device_id = (ids & 0xffff0000) >> 16;
4349 LIST_HEAD(res);
4350
4351 PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
4352 vendor_id, device_id);
4353
4354 ret = of_pci_get_host_bridge_resources(
4355 dev->pdev->dev.of_node,
4356 0, 0xff, &res, &iobase);
4357 if (ret) {
4358 PCIE_ERR(dev,
4359 "PCIe: failed to get host bridge resources for RC%d: %d\n",
4360 dev->rc_idx, ret);
4361 goto out;
4362 }
4363
4364 bus = pci_create_root_bus(&dev->pdev->dev, 0,
Tony Truong09223e42017-11-08 16:50:20 -08004365 &msm_pcie_ops, dev, &res);
Tony Truong349ee492014-10-01 17:35:56 -07004366 if (!bus) {
4367 PCIE_ERR(dev,
4368 "PCIe: failed to create root bus for RC%d\n",
4369 dev->rc_idx);
4370 ret = -ENOMEM;
4371 goto out;
4372 }
4373
4374 scan_ret = pci_scan_child_bus(bus);
4375 PCIE_DBG(dev,
4376 "PCIe: RC%d: The max subordinate bus number discovered is %d\n",
Tony Truong09223e42017-11-08 16:50:20 -08004377 dev->rc_idx, scan_ret);
Tony Truong349ee492014-10-01 17:35:56 -07004378
4379 msm_pcie_fixup_irqs(dev);
4380 pci_assign_unassigned_bus_resources(bus);
4381 pci_bus_add_devices(bus);
4382
4383 dev->enumerated = true;
4384
4385 msm_pcie_write_mask(dev->dm_core +
4386 PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
4387
4388 if (dev->cpl_timeout && dev->bridge_found)
4389 msm_pcie_write_reg_field(dev->dm_core,
4390 PCIE20_DEVICE_CONTROL2_STATUS2,
4391 0xf, dev->cpl_timeout);
4392
4393 if (dev->shadow_en) {
4394 u32 val = readl_relaxed(dev->dm_core +
4395 PCIE20_COMMAND_STATUS);
4396 PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
4397 val);
4398 dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
4399 }
4400
4401 do {
4402 pcidev = pci_get_device(vendor_id,
4403 device_id, pcidev);
4404 if (pcidev && (&msm_pcie_dev[rc_idx] ==
4405 (struct msm_pcie_dev_t *)
4406 PCIE_BUS_PRIV_DATA(pcidev->bus))) {
4407 msm_pcie_dev[rc_idx].dev = pcidev;
4408 found = true;
4409 PCIE_DBG(&msm_pcie_dev[rc_idx],
4410 "PCI device is found for RC%d\n",
4411 rc_idx);
4412 }
4413 } while (!found && pcidev);
4414
4415 if (!pcidev) {
4416 PCIE_ERR(dev,
4417 "PCIe: Did not find PCI device for RC%d.\n",
4418 dev->rc_idx);
4419 ret = -ENODEV;
4420 goto out;
4421 }
4422
4423 bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
4424 &msm_pcie_config_device_table);
4425
4426 if (bus_ret) {
4427 PCIE_ERR(dev,
4428 "PCIe: Failed to set up device table for RC%d\n",
4429 dev->rc_idx);
4430 ret = -ENODEV;
4431 goto out;
4432 }
Tony Truongb1af8b62017-05-31 15:40:38 -07004433
4434 msm_pcie_config_link_pm_rc(dev, dev->dev, true);
Tony Truong349ee492014-10-01 17:35:56 -07004435 } else {
4436 PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
4437 dev->rc_idx);
4438 }
4439 } else {
4440 PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
4441 dev->rc_idx);
4442 }
4443
4444out:
4445 mutex_unlock(&dev->enumerate_lock);
4446
4447 return ret;
4448}
4449EXPORT_SYMBOL(msm_pcie_enumerate);
4450
4451static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
4452 enum msm_pcie_event event)
4453{
4454 if (dev->event_reg && dev->event_reg->callback &&
4455 (dev->event_reg->events & event)) {
4456 struct msm_pcie_notify *notify = &dev->event_reg->notify;
4457
4458 notify->event = event;
4459 notify->user = dev->event_reg->user;
4460 PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
4461 dev->rc_idx, event);
4462 dev->event_reg->callback(notify);
4463
4464 if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
4465 (event == MSM_PCIE_EVENT_LINKDOWN)) {
4466 dev->user_suspend = true;
4467 PCIE_DBG(dev,
4468 "PCIe: Client of RC%d will recover the link later.\n",
4469 dev->rc_idx);
4470 return;
4471 }
4472 } else {
4473 PCIE_DBG2(dev,
4474 "PCIe: Client of RC%d does not have registration for event %d\n",
4475 dev->rc_idx, event);
4476 }
4477}
4478
4479static void handle_wake_func(struct work_struct *work)
4480{
4481 int i, ret;
4482 struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
4483 handle_wake_work);
4484
4485 PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
4486
4487 mutex_lock(&dev->recovery_lock);
4488
4489 if (!dev->enumerated) {
4490 PCIE_DBG(dev,
4491 "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
4492 dev->rc_idx);
4493
4494 ret = msm_pcie_enumerate(dev->rc_idx);
4495 if (ret) {
4496 PCIE_ERR(dev,
4497 "PCIe: failed to enable RC%d upon wake request from the device.\n",
4498 dev->rc_idx);
4499 goto out;
4500 }
4501
4502 if (dev->num_ep > 1) {
4503 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4504 dev->event_reg = dev->pcidev_table[i].event_reg;
4505
4506 if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
4507 && dev->event_reg &&
4508 dev->event_reg->callback &&
4509 (dev->event_reg->events &
4510 MSM_PCIE_EVENT_LINKUP)) {
4511 struct msm_pcie_notify *notify =
4512 &dev->event_reg->notify;
4513 notify->event = MSM_PCIE_EVENT_LINKUP;
4514 notify->user = dev->event_reg->user;
4515 PCIE_DBG(dev,
4516 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
4517 dev->rc_idx);
4518 dev->event_reg->callback(notify);
4519 }
4520 }
4521 } else {
4522 if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
4523 dev->event_reg && dev->event_reg->callback &&
4524 (dev->event_reg->events &
4525 MSM_PCIE_EVENT_LINKUP)) {
4526 struct msm_pcie_notify *notify =
4527 &dev->event_reg->notify;
4528 notify->event = MSM_PCIE_EVENT_LINKUP;
4529 notify->user = dev->event_reg->user;
4530 PCIE_DBG(dev,
4531 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
4532 dev->rc_idx);
4533 dev->event_reg->callback(notify);
4534 } else {
4535 PCIE_DBG(dev,
4536 "PCIe: Client of RC%d does not have registration for linkup event.\n",
4537 dev->rc_idx);
4538 }
4539 }
4540 goto out;
4541 } else {
4542 PCIE_ERR(dev,
4543 "PCIe: The enumeration for RC%d has already been done.\n",
4544 dev->rc_idx);
4545 goto out;
4546 }
4547
4548out:
4549 mutex_unlock(&dev->recovery_lock);
4550}
4551
4552static irqreturn_t handle_aer_irq(int irq, void *data)
4553{
4554 struct msm_pcie_dev_t *dev = data;
4555
4556 int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
4557 int ep_corr_val = 0, ep_uncorr_val = 0;
4558 int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
4559 u32 ep_dev_ctrlstts_offset = 0;
4560 int i, j, ep_src_bdf = 0;
4561 void __iomem *ep_base = NULL;
4562 unsigned long irqsave_flags;
4563
4564 PCIE_DBG2(dev,
4565 "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
4566 dev->rc_idx, irq, dev->rc_corr_counter,
4567 dev->rc_non_fatal_counter, dev->rc_fatal_counter,
4568 dev->ep_corr_counter, dev->ep_non_fatal_counter,
4569 dev->ep_fatal_counter);
4570
4571 spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
4572
4573 if (dev->suspending) {
4574 PCIE_DBG2(dev,
4575 "PCIe: RC%d is currently suspending.\n",
4576 dev->rc_idx);
4577 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
4578 return IRQ_HANDLED;
4579 }
4580
4581 uncorr_val = readl_relaxed(dev->dm_core +
4582 PCIE20_AER_UNCORR_ERR_STATUS_REG);
4583 corr_val = readl_relaxed(dev->dm_core +
4584 PCIE20_AER_CORR_ERR_STATUS_REG);
4585 rc_err_status = readl_relaxed(dev->dm_core +
4586 PCIE20_AER_ROOT_ERR_STATUS_REG);
4587 rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
4588 PCIE20_CAP_DEVCTRLSTATUS);
4589
4590 if (uncorr_val)
4591 PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
4592 uncorr_val);
4593 if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
4594 PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
4595 corr_val);
4596
4597 if ((rc_dev_ctrlstts >> 18) & 0x1)
4598 dev->rc_fatal_counter++;
4599 if ((rc_dev_ctrlstts >> 17) & 0x1)
4600 dev->rc_non_fatal_counter++;
4601 if ((rc_dev_ctrlstts >> 16) & 0x1)
4602 dev->rc_corr_counter++;
4603
4604 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
4605 BIT(18)|BIT(17)|BIT(16));
4606
4607 if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
4608 PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
4609 goto out;
4610 }
4611
4612 for (i = 0; i < 2; i++) {
4613 if (i)
4614 ep_src_bdf = readl_relaxed(dev->dm_core +
4615 PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
4616 else
4617 ep_src_bdf = (readl_relaxed(dev->dm_core +
4618 PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
4619
4620 if (!ep_src_bdf)
4621 continue;
4622
4623 for (j = 0; j < MAX_DEVICE_NUM; j++) {
4624 if (ep_src_bdf == dev->pcidev_table[j].bdf) {
4625 PCIE_DBG2(dev,
4626 "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
4627 i ? "Uncorrectable" : "Correctable",
4628 dev->pcidev_table[j].bdf >> 24,
4629 dev->pcidev_table[j].bdf >> 19 & 0x1f,
4630 dev->pcidev_table[j].bdf >> 16 & 0x07);
4631 ep_base = dev->pcidev_table[j].conf_base;
4632 ep_dev_ctrlstts_offset = dev->
4633 pcidev_table[j].dev_ctrlstts_offset;
4634 break;
4635 }
4636 }
4637
4638 if (!ep_base) {
4639 PCIE_ERR(dev,
4640 "PCIe: RC%d no endpoint found for reported error\n",
4641 dev->rc_idx);
4642 goto out;
4643 }
4644
4645 ep_uncorr_val = readl_relaxed(ep_base +
4646 PCIE20_AER_UNCORR_ERR_STATUS_REG);
4647 ep_corr_val = readl_relaxed(ep_base +
4648 PCIE20_AER_CORR_ERR_STATUS_REG);
4649 ep_dev_ctrlstts = readl_relaxed(ep_base +
4650 ep_dev_ctrlstts_offset);
4651
4652 if (ep_uncorr_val)
4653 PCIE_DBG(dev,
4654 "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
4655 ep_uncorr_val);
4656 if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
4657 PCIE_DBG(dev,
4658 "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
4659 ep_corr_val);
4660
4661 if ((ep_dev_ctrlstts >> 18) & 0x1)
4662 dev->ep_fatal_counter++;
4663 if ((ep_dev_ctrlstts >> 17) & 0x1)
4664 dev->ep_non_fatal_counter++;
4665 if ((ep_dev_ctrlstts >> 16) & 0x1)
4666 dev->ep_corr_counter++;
4667
4668 msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
4669 BIT(18)|BIT(17)|BIT(16));
4670
4671 msm_pcie_write_reg_field(ep_base,
4672 PCIE20_AER_UNCORR_ERR_STATUS_REG,
4673 0x3fff031, 0x3fff031);
4674 msm_pcie_write_reg_field(ep_base,
4675 PCIE20_AER_CORR_ERR_STATUS_REG,
4676 0xf1c1, 0xf1c1);
4677 }
4678out:
4679 if (((dev->rc_corr_counter < corr_counter_limit) &&
4680 (dev->ep_corr_counter < corr_counter_limit)) ||
4681 uncorr_val || ep_uncorr_val)
4682 PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
4683 rc_err_status);
4684 msm_pcie_write_reg_field(dev->dm_core,
4685 PCIE20_AER_UNCORR_ERR_STATUS_REG,
4686 0x3fff031, 0x3fff031);
4687 msm_pcie_write_reg_field(dev->dm_core,
4688 PCIE20_AER_CORR_ERR_STATUS_REG,
4689 0xf1c1, 0xf1c1);
4690 msm_pcie_write_reg_field(dev->dm_core,
4691 PCIE20_AER_ROOT_ERR_STATUS_REG,
4692 0x7f, 0x7f);
4693
4694 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
4695 return IRQ_HANDLED;
4696}
4697
4698static irqreturn_t handle_wake_irq(int irq, void *data)
4699{
4700 struct msm_pcie_dev_t *dev = data;
4701 unsigned long irqsave_flags;
4702 int i;
4703
4704 spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
4705
4706 dev->wake_counter++;
4707 PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
4708 dev->wake_counter, dev->rc_idx);
4709
4710 PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
4711 dev->rc_idx);
4712
Tony Truong9f2c7722017-02-28 15:02:27 -08004713 if (!dev->enumerated && !(dev->boot_option &
4714 MSM_PCIE_NO_WAKE_ENUMERATION)) {
4715 PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
4716 schedule_work(&dev->handle_wake_work);
Tony Truong349ee492014-10-01 17:35:56 -07004717 } else {
4718 PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
4719 __pm_stay_awake(&dev->ws);
4720 __pm_relax(&dev->ws);
4721
4722 if (dev->num_ep > 1) {
4723 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4724 dev->event_reg =
4725 dev->pcidev_table[i].event_reg;
4726 msm_pcie_notify_client(dev,
4727 MSM_PCIE_EVENT_WAKEUP);
4728 }
4729 } else {
4730 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
4731 }
4732 }
4733
4734 spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
4735
4736 return IRQ_HANDLED;
4737}
4738
4739static irqreturn_t handle_linkdown_irq(int irq, void *data)
4740{
4741 struct msm_pcie_dev_t *dev = data;
4742 unsigned long irqsave_flags;
4743 int i;
4744
4745 spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
4746
4747 dev->linkdown_counter++;
4748
4749 PCIE_DBG(dev,
4750 "PCIe: No. %ld linkdown IRQ for RC%d.\n",
4751 dev->linkdown_counter, dev->rc_idx);
4752
4753 if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
4754 PCIE_DBG(dev,
4755 "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
4756 dev->rc_idx);
4757 } else if (dev->suspending) {
4758 PCIE_DBG(dev,
4759 "PCIe:the link of RC%d is suspending.\n",
4760 dev->rc_idx);
4761 } else {
4762 dev->link_status = MSM_PCIE_LINK_DISABLED;
4763 dev->shadow_en = false;
4764
4765 if (dev->linkdown_panic)
4766 panic("User has chosen to panic on linkdown\n");
4767
4768 /* assert PERST */
Tony Truong7416d722017-09-12 16:45:18 -07004769 if (!(msm_pcie_keep_resources_on & BIT(dev->rc_idx)))
4770 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4771 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4772
Tony Truong349ee492014-10-01 17:35:56 -07004773 PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
4774
4775 if (dev->num_ep > 1) {
4776 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4777 dev->event_reg =
4778 dev->pcidev_table[i].event_reg;
4779 msm_pcie_notify_client(dev,
4780 MSM_PCIE_EVENT_LINKDOWN);
4781 }
4782 } else {
4783 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
4784 }
4785 }
4786
4787 spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
4788
4789 return IRQ_HANDLED;
4790}
4791
4792static irqreturn_t handle_msi_irq(int irq, void *data)
4793{
4794 int i, j;
4795 unsigned long val;
4796 struct msm_pcie_dev_t *dev = data;
4797 void __iomem *ctrl_status;
4798
4799 PCIE_DUMP(dev, "irq: %d\n", irq);
4800
4801 /*
4802 * check for set bits, clear it by setting that bit
4803 * and trigger corresponding irq
4804 */
4805 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
4806 ctrl_status = dev->dm_core +
4807 PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
4808
4809 val = readl_relaxed(ctrl_status);
4810 while (val) {
4811 j = find_first_bit(&val, 32);
4812 writel_relaxed(BIT(j), ctrl_status);
4813 /* ensure that interrupt is cleared (acked) */
4814 wmb();
4815 generic_handle_irq(
4816 irq_find_mapping(dev->irq_domain, (j + (32*i)))
4817 );
4818 val = readl_relaxed(ctrl_status);
4819 }
4820 }
4821
4822 return IRQ_HANDLED;
4823}
4824
4825static irqreturn_t handle_global_irq(int irq, void *data)
4826{
4827 int i;
4828 struct msm_pcie_dev_t *dev = data;
4829 unsigned long irqsave_flags;
4830 u32 status = 0;
4831
4832 spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
4833
4834 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
4835 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
4836
4837 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
4838
4839 PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
4840 dev->rc_idx, irq, status);
4841
4842 for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
4843 if (status & BIT(i)) {
4844 switch (i) {
4845 case MSM_PCIE_INT_EVT_LINK_DOWN:
4846 PCIE_DBG(dev,
4847 "PCIe: RC%d: handle linkdown event.\n",
4848 dev->rc_idx);
4849 handle_linkdown_irq(irq, data);
4850 break;
4851 case MSM_PCIE_INT_EVT_AER_LEGACY:
4852 PCIE_DBG(dev,
4853 "PCIe: RC%d: AER legacy event.\n",
4854 dev->rc_idx);
4855 handle_aer_irq(irq, data);
4856 break;
4857 case MSM_PCIE_INT_EVT_AER_ERR:
4858 PCIE_DBG(dev,
4859 "PCIe: RC%d: AER event.\n",
4860 dev->rc_idx);
4861 handle_aer_irq(irq, data);
4862 break;
4863 default:
Tony Truong3f110d42017-04-07 17:12:23 -07004864 PCIE_DUMP(dev,
Tony Truong349ee492014-10-01 17:35:56 -07004865 "PCIe: RC%d: Unexpected event %d is caught!\n",
4866 dev->rc_idx, i);
4867 }
4868 }
4869 }
4870
4871 spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
4872
4873 return IRQ_HANDLED;
4874}
4875
Tony Truong52122a62017-03-23 18:00:34 -07004876static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
4877 struct pci_dev *pdev)
4878{
4879 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
4880 int bypass_en = 0;
4881
4882 if (!domain) {
4883 PCIE_DBG(dev,
4884 "PCIe: RC%d: client does not have an iommu domain\n",
4885 dev->rc_idx);
4886 return;
4887 }
4888
4889 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
4890 if (!bypass_en) {
4891 int ret;
4892 phys_addr_t pcie_base_addr =
4893 dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
4894 dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
4895
4896 ret = iommu_unmap(domain, iova, PAGE_SIZE);
4897 if (ret != PAGE_SIZE)
4898 PCIE_ERR(dev,
4899 "PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
4900 dev->rc_idx, ret);
4901 }
4902}
4903
Stephen Boydb5b8fc32017-06-21 08:59:11 -07004904static void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev)
Tony Truong349ee492014-10-01 17:35:56 -07004905{
Tony Truongc3c52ae2017-03-29 12:16:51 -07004906 int pos;
Tony Truongc3c52ae2017-03-29 12:16:51 -07004907 struct msi_desc *entry = irq_get_msi_desc(irq);
4908 struct msi_desc *firstentry;
Tony Truong349ee492014-10-01 17:35:56 -07004909 struct msm_pcie_dev_t *dev;
Tony Truongc3c52ae2017-03-29 12:16:51 -07004910 u32 nvec;
4911 int firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07004912
Tony Truongb09d0e82017-06-02 13:37:36 -07004913 if (!pdev)
4914 pdev = irq_get_chip_data(irq);
4915
Tony Truongc3c52ae2017-03-29 12:16:51 -07004916 if (!pdev) {
4917 pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
Tony Truong349ee492014-10-01 17:35:56 -07004918 return;
4919 }
4920
Tony Truongc3c52ae2017-03-29 12:16:51 -07004921 dev = PCIE_BUS_PRIV_DATA(pdev->bus);
4922 if (!dev) {
4923 pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
4924 return;
4925 }
4926
4927 if (!entry) {
4928 PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
4929 dev->rc_idx, irq);
4930 return;
4931 }
4932
4933 firstentry = first_pci_msi_entry(pdev);
4934 if (!firstentry) {
4935 PCIE_ERR(dev,
4936 "PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
4937 dev->rc_idx, irq);
4938 return;
4939 }
4940
4941 firstirq = firstentry->irq;
4942 nvec = (1 << entry->msi_attrib.multiple);
4943
Tony Truong349ee492014-10-01 17:35:56 -07004944 if (dev->msi_gicm_addr) {
4945 PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
4946
Tony Truongc3c52ae2017-03-29 12:16:51 -07004947 if (irq < firstirq || irq > firstirq + nvec - 1) {
Tony Truong349ee492014-10-01 17:35:56 -07004948 PCIE_ERR(dev,
4949 "Could not find irq: %d in RC%d MSI table\n",
4950 irq, dev->rc_idx);
4951 return;
4952 }
Tony Truong52122a62017-03-23 18:00:34 -07004953 if (irq == firstirq + nvec - 1)
4954 msm_pcie_unmap_qgic_addr(dev, pdev);
Tony Truongc3c52ae2017-03-29 12:16:51 -07004955 pos = irq - firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07004956 } else {
4957 PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
4958 pos = irq - irq_find_mapping(dev->irq_domain, 0);
4959 }
4960
4961 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4962
4963 PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
4964 pos, *dev->msi_irq_in_use);
4965 clear_bit(pos, dev->msi_irq_in_use);
4966 PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
4967 pos, *dev->msi_irq_in_use);
4968}
4969
4970/* hookup to linux pci msi framework */
4971void arch_teardown_msi_irq(unsigned int irq)
4972{
4973 PCIE_GEN_DBG("irq %d deallocated\n", irq);
Tony Truongb09d0e82017-06-02 13:37:36 -07004974 msm_pcie_destroy_irq(irq, NULL);
Tony Truong349ee492014-10-01 17:35:56 -07004975}
4976
4977void arch_teardown_msi_irqs(struct pci_dev *dev)
4978{
4979 struct msi_desc *entry;
4980 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
4981
4982 PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
4983 pcie_dev->rc_idx, dev->vendor, dev->device);
4984
4985 pcie_dev->use_msi = false;
4986
4987 list_for_each_entry(entry, &dev->dev.msi_list, list) {
4988 int i, nvec;
4989
4990 if (entry->irq == 0)
4991 continue;
4992 nvec = 1 << entry->msi_attrib.multiple;
4993 for (i = 0; i < nvec; i++)
Tony Truongb09d0e82017-06-02 13:37:36 -07004994 msm_pcie_destroy_irq(entry->irq + i, dev);
Tony Truong349ee492014-10-01 17:35:56 -07004995 }
4996}
4997
4998static void msm_pcie_msi_nop(struct irq_data *d)
4999{
5000}
5001
5002static struct irq_chip pcie_msi_chip = {
5003 .name = "msm-pcie-msi",
5004 .irq_ack = msm_pcie_msi_nop,
5005 .irq_enable = unmask_msi_irq,
5006 .irq_disable = mask_msi_irq,
5007 .irq_mask = mask_msi_irq,
5008 .irq_unmask = unmask_msi_irq,
5009};
5010
5011static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
5012{
5013 int irq, pos;
5014
5015 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5016
5017again:
5018 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5019
5020 if (pos >= PCIE_MSI_NR_IRQS)
5021 return -ENOSPC;
5022
5023 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5024
5025 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5026 goto again;
5027 else
5028 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5029
5030 irq = irq_create_mapping(dev->irq_domain, pos);
5031 if (!irq)
5032 return -EINVAL;
5033
5034 return irq;
5035}
5036
5037static int arch_setup_msi_irq_default(struct pci_dev *pdev,
5038 struct msi_desc *desc, int nvec)
5039{
5040 int irq;
5041 struct msi_msg msg;
5042 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5043
5044 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5045
5046 irq = msm_pcie_create_irq(dev);
5047
5048 PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
5049
5050 if (irq < 0)
5051 return irq;
5052
5053 PCIE_DBG(dev, "irq %d allocated\n", irq);
5054
Tony Truongc3c52ae2017-03-29 12:16:51 -07005055 irq_set_chip_data(irq, pdev);
Tony Truong349ee492014-10-01 17:35:56 -07005056 irq_set_msi_desc(irq, desc);
5057
5058 /* write msi vector and data */
5059 msg.address_hi = 0;
5060 msg.address_lo = MSM_PCIE_MSI_PHY;
5061 msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
5062 write_msi_msg(irq, &msg);
5063
5064 return 0;
5065}
5066
5067static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
5068{
5069 int irq, pos;
5070
5071 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5072
5073again:
5074 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
5075
5076 if (pos >= PCIE_MSI_NR_IRQS)
5077 return -ENOSPC;
5078
5079 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
5080
5081 if (test_and_set_bit(pos, dev->msi_irq_in_use))
5082 goto again;
5083 else
5084 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
5085
5086 if (pos >= MSM_PCIE_MAX_MSI) {
5087 PCIE_ERR(dev,
5088 "PCIe: RC%d: pos %d is not less than %d\n",
5089 dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
5090 return MSM_PCIE_ERROR;
5091 }
5092
5093 irq = dev->msi[pos].num;
5094 if (!irq) {
5095 PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
5096 dev->rc_idx);
5097 return -EINVAL;
5098 }
5099
5100 return irq;
5101}
5102
Tony Truong52122a62017-03-23 18:00:34 -07005103static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
5104 struct pci_dev *pdev,
5105 struct msi_msg *msg)
5106{
5107 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
Tony Truong39a13792017-04-07 18:33:10 -07005108 struct iommu_domain_geometry geometry;
Tony Truong4a70c6b2017-06-05 18:27:32 -07005109 int fastmap_en = 0, bypass_en = 0;
5110 dma_addr_t iova, addr;
Tony Truong52122a62017-03-23 18:00:34 -07005111
5112 msg->address_hi = 0;
5113 msg->address_lo = dev->msi_gicm_addr;
5114
5115 if (!domain) {
5116 PCIE_DBG(dev,
5117 "PCIe: RC%d: client does not have an iommu domain\n",
5118 dev->rc_idx);
5119 return 0;
5120 }
5121
5122 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
5123
5124 PCIE_DBG(dev,
5125 "PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
5126 dev->rc_idx, bypass_en ? "bypass" : "enabled",
5127 pdev->bus->number, pdev->devfn);
5128
5129 if (bypass_en)
5130 return 0;
5131
Tony Truong39a13792017-04-07 18:33:10 -07005132 iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap_en);
5133 if (fastmap_en) {
5134 iommu_domain_get_attr(domain, DOMAIN_ATTR_GEOMETRY, &geometry);
5135 iova = geometry.aperture_start;
5136 PCIE_DBG(dev,
5137 "PCIe: RC%d: Use client's IOVA 0x%llx to map QGIC MSI address\n",
5138 dev->rc_idx, iova);
5139 } else {
5140 phys_addr_t pcie_base_addr;
5141
5142 /*
5143 * Use PCIe DBI address as the IOVA since client cannot
5144 * use this address for their IOMMU mapping. This will
5145 * prevent any conflicts between PCIe host and
5146 * client's mapping.
5147 */
5148 pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
5149 iova = rounddown(pcie_base_addr, PAGE_SIZE);
5150 }
Tony Truong52122a62017-03-23 18:00:34 -07005151
Tony Truong4a70c6b2017-06-05 18:27:32 -07005152 addr = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE,
5153 DMA_BIDIRECTIONAL, 0);
5154 if (dma_mapping_error(&pdev->dev, addr)) {
5155 PCIE_ERR(dev, "PCIe: RC%d: failed to map QGIC address",
5156 dev->rc_idx);
5157 return -EIO;
Tony Truong52122a62017-03-23 18:00:34 -07005158 }
5159
Tony Truong4a70c6b2017-06-05 18:27:32 -07005160 msg->address_lo = iova + addr;
Tony Truong52122a62017-03-23 18:00:34 -07005161
5162 return 0;
5163}
5164
Tony Truong349ee492014-10-01 17:35:56 -07005165static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
5166 struct msi_desc *desc, int nvec)
5167{
Tony Truong52122a62017-03-23 18:00:34 -07005168 int irq, index, ret, firstirq = 0;
Tony Truong349ee492014-10-01 17:35:56 -07005169 struct msi_msg msg;
5170 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5171
5172 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5173
5174 for (index = 0; index < nvec; index++) {
5175 irq = msm_pcie_create_irq_qgic(dev);
5176 PCIE_DBG(dev, "irq %d is allocated\n", irq);
5177
5178 if (irq < 0)
5179 return irq;
5180
5181 if (index == 0)
5182 firstirq = irq;
5183
5184 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
5185 }
5186
5187 /* write msi vector and data */
5188 irq_set_msi_desc(firstirq, desc);
Tony Truong52122a62017-03-23 18:00:34 -07005189
5190 ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
5191 if (ret)
5192 return ret;
5193
Tony Truong349ee492014-10-01 17:35:56 -07005194 msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
5195 write_msi_msg(firstirq, &msg);
5196
5197 return 0;
5198}
5199
5200int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
5201{
5202 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5203
5204 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5205
5206 if (dev->msi_gicm_addr)
5207 return arch_setup_msi_irq_qgic(pdev, desc, 1);
5208 else
5209 return arch_setup_msi_irq_default(pdev, desc, 1);
5210}
5211
5212static int msm_pcie_get_msi_multiple(int nvec)
5213{
5214 int msi_multiple = 0;
5215
5216 while (nvec) {
5217 nvec = nvec >> 1;
5218 msi_multiple++;
5219 }
5220 PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
5221 msi_multiple - 1);
5222
5223 return msi_multiple - 1;
5224}
5225
5226int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
5227{
5228 struct msi_desc *entry;
5229 int ret;
5230 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5231
5232 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5233
5234 if (type != PCI_CAP_ID_MSI || nvec > 32)
5235 return -ENOSPC;
5236
5237 PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
5238
5239 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5240 entry->msi_attrib.multiple =
5241 msm_pcie_get_msi_multiple(nvec);
5242
5243 if (pcie_dev->msi_gicm_addr)
5244 ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
5245 else
5246 ret = arch_setup_msi_irq_default(dev, entry, nvec);
5247
5248 PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
5249
5250 if (ret < 0)
5251 return ret;
5252 if (ret > 0)
5253 return -ENOSPC;
5254 }
5255
5256 pcie_dev->use_msi = true;
5257
5258 return 0;
5259}
5260
5261static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
5262 irq_hw_number_t hwirq)
5263{
5264 irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
Tony Truong349ee492014-10-01 17:35:56 -07005265 return 0;
5266}
5267
5268static const struct irq_domain_ops msm_pcie_msi_ops = {
5269 .map = msm_pcie_msi_map,
5270};
5271
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005272static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07005273{
5274 int rc;
5275 int msi_start = 0;
5276 struct device *pdev = &dev->pdev->dev;
5277
5278 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5279
5280 if (dev->rc_idx)
5281 wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
5282 else
5283 wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
5284
5285 /* register handler for linkdown interrupt */
5286 if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
5287 rc = devm_request_irq(pdev,
5288 dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
5289 handle_linkdown_irq,
5290 IRQF_TRIGGER_RISING,
5291 dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
5292 dev);
5293 if (rc) {
5294 PCIE_ERR(dev,
5295 "PCIe: Unable to request linkdown interrupt:%d\n",
5296 dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
5297 return rc;
5298 }
5299 }
5300
5301 /* register handler for physical MSI interrupt line */
5302 if (dev->irq[MSM_PCIE_INT_MSI].num) {
5303 rc = devm_request_irq(pdev,
5304 dev->irq[MSM_PCIE_INT_MSI].num,
5305 handle_msi_irq,
5306 IRQF_TRIGGER_RISING,
5307 dev->irq[MSM_PCIE_INT_MSI].name,
5308 dev);
5309 if (rc) {
5310 PCIE_ERR(dev,
5311 "PCIe: RC%d: Unable to request MSI interrupt\n",
5312 dev->rc_idx);
5313 return rc;
5314 }
5315 }
5316
5317 /* register handler for AER interrupt */
5318 if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
5319 rc = devm_request_irq(pdev,
5320 dev->irq[MSM_PCIE_INT_PLS_ERR].num,
5321 handle_aer_irq,
5322 IRQF_TRIGGER_RISING,
5323 dev->irq[MSM_PCIE_INT_PLS_ERR].name,
5324 dev);
5325 if (rc) {
5326 PCIE_ERR(dev,
5327 "PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
5328 dev->rc_idx,
5329 dev->irq[MSM_PCIE_INT_PLS_ERR].num);
5330 return rc;
5331 }
5332 }
5333
5334 /* register handler for AER legacy interrupt */
5335 if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
5336 rc = devm_request_irq(pdev,
5337 dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
5338 handle_aer_irq,
5339 IRQF_TRIGGER_RISING,
5340 dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
5341 dev);
5342 if (rc) {
5343 PCIE_ERR(dev,
5344 "PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
5345 dev->rc_idx,
5346 dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
5347 return rc;
5348 }
5349 }
5350
5351 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
5352 rc = devm_request_irq(pdev,
5353 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
5354 handle_global_irq,
5355 IRQF_TRIGGER_RISING,
5356 dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
5357 dev);
5358 if (rc) {
5359 PCIE_ERR(dev,
5360 "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
5361 dev->rc_idx,
5362 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
5363 return rc;
5364 }
5365 }
5366
5367 /* register handler for PCIE_WAKE_N interrupt line */
5368 if (dev->wake_n) {
5369 rc = devm_request_irq(pdev,
5370 dev->wake_n, handle_wake_irq,
5371 IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
5372 if (rc) {
5373 PCIE_ERR(dev,
5374 "PCIe: RC%d: Unable to request wake interrupt\n",
5375 dev->rc_idx);
5376 return rc;
5377 }
5378
5379 INIT_WORK(&dev->handle_wake_work, handle_wake_func);
5380
5381 rc = enable_irq_wake(dev->wake_n);
5382 if (rc) {
5383 PCIE_ERR(dev,
5384 "PCIe: RC%d: Unable to enable wake interrupt\n",
5385 dev->rc_idx);
5386 return rc;
5387 }
5388 }
5389
5390 /* Create a virtual domain of interrupts */
5391 if (!dev->msi_gicm_addr) {
5392 dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
5393 PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
5394
5395 if (!dev->irq_domain) {
5396 PCIE_ERR(dev,
5397 "PCIe: RC%d: Unable to initialize irq domain\n",
5398 dev->rc_idx);
5399
5400 if (dev->wake_n)
5401 disable_irq(dev->wake_n);
5402
5403 return PTR_ERR(dev->irq_domain);
5404 }
5405
5406 msi_start = irq_create_mapping(dev->irq_domain, 0);
5407 }
5408
5409 return 0;
5410}
5411
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005412static void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07005413{
5414 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5415
5416 wakeup_source_trash(&dev->ws);
5417
5418 if (dev->wake_n)
5419 disable_irq(dev->wake_n);
5420}
5421
Tony Truongb1af8b62017-05-31 15:40:38 -07005422static void msm_pcie_config_l0s(struct msm_pcie_dev_t *dev,
5423 struct pci_dev *pdev, bool enable)
5424{
5425 u32 val;
5426 u32 lnkcap_offset = pdev->pcie_cap + PCI_EXP_LNKCAP;
5427 u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
5428
5429 pci_read_config_dword(pdev, lnkcap_offset, &val);
5430 if (!(val & BIT(10))) {
5431 PCIE_DBG(dev,
5432 "PCIe: RC%d: PCI device does not support L0s\n",
5433 dev->rc_idx);
5434 return;
5435 }
5436
5437 if (enable)
5438 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
5439 PCI_EXP_LNKCTL_ASPM_L0S);
5440 else
5441 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset,
5442 PCI_EXP_LNKCTL_ASPM_L0S, 0);
5443
5444 pci_read_config_dword(pdev, lnkctl_offset, &val);
5445 PCIE_DBG2(dev, "PCIe: RC%d: LINKCTRLSTATUS:0x%x\n", dev->rc_idx, val);
5446}
5447
5448static void msm_pcie_config_l1(struct msm_pcie_dev_t *dev,
5449 struct pci_dev *pdev, bool enable)
5450{
5451 u32 val;
5452 u32 lnkcap_offset = pdev->pcie_cap + PCI_EXP_LNKCAP;
5453 u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
5454
5455 pci_read_config_dword(pdev, lnkcap_offset, &val);
5456 if (!(val & BIT(11))) {
5457 PCIE_DBG(dev,
5458 "PCIe: RC%d: PCI device does not support L1\n",
5459 dev->rc_idx);
5460 return;
5461 }
5462
5463 if (enable)
5464 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
5465 PCI_EXP_LNKCTL_ASPM_L1);
5466 else
5467 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset,
5468 PCI_EXP_LNKCTL_ASPM_L1, 0);
5469
5470 pci_read_config_dword(pdev, lnkctl_offset, &val);
5471 PCIE_DBG2(dev, "PCIe: RC%d: LINKCTRLSTATUS:0x%x\n", dev->rc_idx, val);
5472}
5473
5474static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev,
5475 struct pci_dev *pdev, bool enable)
5476{
Tony Truonge26150f2017-12-18 15:38:51 -08005477 bool l1_1_pcipm_support, l1_2_pcipm_support;
5478 bool l1_1_aspm_support, l1_2_aspm_support;
Tony Truongb1af8b62017-05-31 15:40:38 -07005479 u32 val, val2;
5480 u32 l1ss_cap_id_offset, l1ss_cap_offset, l1ss_ctl1_offset;
5481 u32 devctl2_offset = pdev->pcie_cap + PCI_EXP_DEVCTL2;
5482
5483 l1ss_cap_id_offset = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
5484 if (!l1ss_cap_id_offset) {
5485 PCIE_DBG(dev,
5486 "PCIe: RC%d could not find L1ss capability register for device\n",
5487 dev->rc_idx);
5488 return;
5489 }
5490
5491 l1ss_cap_offset = l1ss_cap_id_offset + PCI_L1SS_CAP;
5492 l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
5493
5494 pci_read_config_dword(pdev, l1ss_cap_offset, &val);
Tony Truonge26150f2017-12-18 15:38:51 -08005495 l1_1_pcipm_support = !!(val & (PCI_L1SS_CAP_PCIPM_L1_1));
5496 l1_2_pcipm_support = !!(val & (PCI_L1SS_CAP_PCIPM_L1_2));
5497 l1_1_aspm_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_1));
5498 l1_2_aspm_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_2));
5499 if (!l1_1_pcipm_support && !l1_2_pcipm_support &&
5500 !l1_1_aspm_support && !l1_2_aspm_support) {
Tony Truongb1af8b62017-05-31 15:40:38 -07005501 PCIE_DBG(dev,
Tony Truonge26150f2017-12-18 15:38:51 -08005502 "PCIe: RC%d: PCI device does not support any L1ss\n",
Tony Truongb1af8b62017-05-31 15:40:38 -07005503 dev->rc_idx);
5504 return;
5505 }
5506
5507 /* Enable the AUX Clock and the Core Clk to be synchronous for L1ss */
5508 if (pci_is_root_bus(pdev->bus) && !dev->aux_clk_sync) {
5509 if (enable)
5510 msm_pcie_write_mask(dev->parf +
5511 PCIE20_PARF_SYS_CTRL, BIT(3), 0);
5512 else
5513 msm_pcie_write_mask(dev->parf +
5514 PCIE20_PARF_SYS_CTRL, 0, BIT(3));
5515 }
5516
5517 if (enable) {
5518 msm_pcie_config_clear_set_dword(pdev, devctl2_offset, 0,
5519 PCI_EXP_DEVCTL2_LTR_EN);
5520 msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset, 0,
Tony Truonge26150f2017-12-18 15:38:51 -08005521 (l1_1_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
5522 (l1_2_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
5523 (l1_1_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
5524 (l1_2_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0));
Tony Truongb1af8b62017-05-31 15:40:38 -07005525 } else {
5526 msm_pcie_config_clear_set_dword(pdev, devctl2_offset,
5527 PCI_EXP_DEVCTL2_LTR_EN, 0);
5528 msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset,
Tony Truonge26150f2017-12-18 15:38:51 -08005529 (l1_1_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
5530 (l1_2_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
5531 (l1_1_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
5532 (l1_2_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0), 0);
Tony Truongb1af8b62017-05-31 15:40:38 -07005533 }
5534
5535 pci_read_config_dword(pdev, l1ss_ctl1_offset, &val);
5536 PCIE_DBG2(dev, "PCIe: RC%d: L1SUB_CONTROL1:0x%x\n", dev->rc_idx, val);
5537
5538 pci_read_config_dword(pdev, devctl2_offset, &val2);
5539 PCIE_DBG2(dev, "PCIe: RC%d: DEVICE_CONTROL2_STATUS2::0x%x\n",
5540 dev->rc_idx, val2);
5541}
5542
5543static void msm_pcie_config_clock_power_management(struct msm_pcie_dev_t *dev,
5544 struct pci_dev *pdev)
5545{
5546 u32 val;
5547 u32 lnkcap_offset = pdev->pcie_cap + PCI_EXP_LNKCAP;
5548 u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
5549
5550 if (pci_is_root_bus(pdev->bus))
5551 return;
5552
5553 pci_read_config_dword(pdev, lnkcap_offset, &val);
5554 if (val & PCI_EXP_LNKCAP_CLKPM)
5555 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
5556 PCI_EXP_LNKCTL_CLKREQ_EN);
5557 else
5558 PCIE_DBG(dev,
5559 "PCIe: RC%d: PCI device does not support clock power management\n",
5560 dev->rc_idx);
5561}
5562
5563static void msm_pcie_config_link_pm(struct msm_pcie_dev_t *dev,
5564 struct pci_dev *pdev, bool enable)
5565{
5566 if (dev->common_clk_en)
5567 msm_pcie_config_clear_set_dword(pdev,
5568 pdev->pcie_cap + PCI_EXP_LNKCTL, 0,
5569 PCI_EXP_LNKCTL_CCC);
5570
5571 if (dev->clk_power_manage_en)
5572 msm_pcie_config_clock_power_management(dev, pdev);
5573 if (dev->l0s_supported)
5574 msm_pcie_config_l0s(dev, pdev, enable);
5575 if (dev->l1ss_supported)
5576 msm_pcie_config_l1ss(dev, pdev, enable);
5577 if (dev->l1_supported)
5578 msm_pcie_config_l1(dev, pdev, enable);
5579}
5580
5581static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev,
5582 struct pci_dev *pdev, bool enable)
5583{
5584 bool child_l0s_enable = 0, child_l1_enable = 0, child_l1ss_enable = 0;
5585
5586 if (!pdev->subordinate || !(&pdev->subordinate->devices)) {
5587 PCIE_DBG(dev,
5588 "PCIe: RC%d: no device connected to root complex\n",
5589 dev->rc_idx);
5590 return;
5591 }
5592
5593 if (dev->l0s_supported) {
5594 struct pci_dev *child_pdev, *c_pdev;
5595
5596 list_for_each_entry_safe(child_pdev, c_pdev,
5597 &pdev->subordinate->devices, bus_list) {
5598 u32 val;
5599
5600 pci_read_config_dword(child_pdev,
Tony Truonge023d012017-11-10 13:36:26 -08005601 child_pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
Tony Truongb1af8b62017-05-31 15:40:38 -07005602 child_l0s_enable = !!(val & PCI_EXP_LNKCTL_ASPM_L0S);
5603 if (child_l0s_enable)
5604 break;
5605 }
5606
5607 if (child_l0s_enable)
5608 msm_pcie_config_l0s(dev, pdev, enable);
5609 else
5610 dev->l0s_supported = false;
5611 }
5612
5613 if (dev->l1ss_supported) {
5614 struct pci_dev *child_pdev, *c_pdev;
5615
5616 list_for_each_entry_safe(child_pdev, c_pdev,
5617 &pdev->subordinate->devices, bus_list) {
5618 u32 val;
5619 u32 l1ss_cap_id_offset =
5620 pci_find_ext_capability(child_pdev,
5621 PCI_EXT_CAP_ID_L1SS);
5622
5623 if (!l1ss_cap_id_offset)
5624 continue;
5625
5626 pci_read_config_dword(child_pdev,
5627 l1ss_cap_id_offset + PCI_L1SS_CTL1, &val);
5628 child_l1ss_enable = !!(val &
Tony Truonge26150f2017-12-18 15:38:51 -08005629 (PCI_L1SS_CTL1_PCIPM_L1_1 |
5630 PCI_L1SS_CTL1_PCIPM_L1_2 |
5631 PCI_L1SS_CTL1_ASPM_L1_1 |
Tony Truongb1af8b62017-05-31 15:40:38 -07005632 PCI_L1SS_CTL1_ASPM_L1_2));
5633 if (child_l1ss_enable)
5634 break;
5635 }
5636
5637 if (child_l1ss_enable)
5638 msm_pcie_config_l1ss(dev, pdev, enable);
5639 else
5640 dev->l1ss_supported = false;
5641 }
5642
5643 if (dev->l1_supported) {
5644 struct pci_dev *child_pdev, *c_pdev;
5645
5646 list_for_each_entry_safe(child_pdev, c_pdev,
5647 &pdev->subordinate->devices, bus_list) {
5648 u32 val;
5649
5650 pci_read_config_dword(child_pdev,
Tony Truonge023d012017-11-10 13:36:26 -08005651 child_pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
Tony Truongb1af8b62017-05-31 15:40:38 -07005652 child_l1_enable = !!(val & PCI_EXP_LNKCTL_ASPM_L1);
5653 if (child_l1_enable)
5654 break;
5655 }
5656
5657 if (child_l1_enable)
5658 msm_pcie_config_l1(dev, pdev, enable);
5659 else
5660 dev->l1_supported = false;
5661 }
5662}
5663
Tony Truong7772e692017-04-13 17:03:34 -07005664static int msm_pcie_config_device(struct pci_dev *dev, void *pdev)
5665{
5666 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)pdev;
5667 u8 busnr = dev->bus->number;
5668 u8 slot = PCI_SLOT(dev->devfn);
5669 u8 func = PCI_FUNC(dev->devfn);
5670
5671 PCIE_DBG(pcie_dev, "PCIe: RC%d: configure PCI device %02x:%02x.%01x\n",
5672 pcie_dev->rc_idx, busnr, slot, func);
5673
Tony Truong2a022a02017-04-13 14:04:30 -07005674 msm_pcie_configure_sid(pcie_dev, dev);
5675
Tony Truongb1af8b62017-05-31 15:40:38 -07005676 if (!pci_is_root_bus(dev->bus))
5677 msm_pcie_config_link_pm(pcie_dev, dev, true);
5678
Tony Truong7772e692017-04-13 17:03:34 -07005679 return 0;
5680}
5681
5682/* Hook to setup PCI device during PCI framework scan */
5683int pcibios_add_device(struct pci_dev *dev)
5684{
5685 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5686
5687 return msm_pcie_config_device(dev, pcie_dev);
5688}
Tony Truong349ee492014-10-01 17:35:56 -07005689
5690static int msm_pcie_probe(struct platform_device *pdev)
5691{
5692 int ret = 0;
5693 int rc_idx = -1;
5694 int i, j;
5695
5696 PCIE_GEN_DBG("%s\n", __func__);
5697
5698 mutex_lock(&pcie_drv.drv_lock);
5699
5700 ret = of_property_read_u32((&pdev->dev)->of_node,
5701 "cell-index", &rc_idx);
5702 if (ret) {
5703 PCIE_GEN_DBG("Did not find RC index.\n");
5704 goto out;
5705 } else {
5706 if (rc_idx >= MAX_RC_NUM) {
5707 pr_err(
5708 "PCIe: Invalid RC Index %d (max supported = %d)\n",
5709 rc_idx, MAX_RC_NUM);
5710 goto out;
5711 }
5712 pcie_drv.rc_num++;
5713 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
5714 rc_idx);
5715 }
5716
5717 msm_pcie_dev[rc_idx].l0s_supported =
5718 of_property_read_bool((&pdev->dev)->of_node,
5719 "qcom,l0s-supported");
Tony Truong7416d722017-09-12 16:45:18 -07005720 if (msm_pcie_invert_l0s_support & BIT(rc_idx))
5721 msm_pcie_dev[rc_idx].l0s_supported =
5722 !msm_pcie_dev[rc_idx].l0s_supported;
Tony Truong349ee492014-10-01 17:35:56 -07005723 PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
5724 msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
5725 msm_pcie_dev[rc_idx].l1_supported =
5726 of_property_read_bool((&pdev->dev)->of_node,
5727 "qcom,l1-supported");
Tony Truong7416d722017-09-12 16:45:18 -07005728 if (msm_pcie_invert_l1_support & BIT(rc_idx))
5729 msm_pcie_dev[rc_idx].l1_supported =
5730 !msm_pcie_dev[rc_idx].l1_supported;
Tony Truong349ee492014-10-01 17:35:56 -07005731 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
5732 msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
5733 msm_pcie_dev[rc_idx].l1ss_supported =
5734 of_property_read_bool((&pdev->dev)->of_node,
5735 "qcom,l1ss-supported");
Tony Truong7416d722017-09-12 16:45:18 -07005736 if (msm_pcie_invert_l1ss_support & BIT(rc_idx))
5737 msm_pcie_dev[rc_idx].l1ss_supported =
5738 !msm_pcie_dev[rc_idx].l1ss_supported;
Tony Truong349ee492014-10-01 17:35:56 -07005739 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
5740 msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
5741 msm_pcie_dev[rc_idx].common_clk_en =
5742 of_property_read_bool((&pdev->dev)->of_node,
5743 "qcom,common-clk-en");
5744 PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
5745 msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
5746 msm_pcie_dev[rc_idx].clk_power_manage_en =
5747 of_property_read_bool((&pdev->dev)->of_node,
5748 "qcom,clk-power-manage-en");
5749 PCIE_DBG(&msm_pcie_dev[rc_idx],
5750 "Clock power management is %s enabled.\n",
5751 msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
5752 msm_pcie_dev[rc_idx].aux_clk_sync =
5753 of_property_read_bool((&pdev->dev)->of_node,
5754 "qcom,aux-clk-sync");
5755 PCIE_DBG(&msm_pcie_dev[rc_idx],
5756 "AUX clock is %s synchronous to Core clock.\n",
5757 msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
5758
5759 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
5760 of_property_read_bool((&pdev->dev)->of_node,
5761 "qcom,use-19p2mhz-aux-clk");
5762 PCIE_DBG(&msm_pcie_dev[rc_idx],
5763 "AUX clock frequency is %s 19.2MHz.\n",
5764 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
5765
5766 msm_pcie_dev[rc_idx].smmu_exist =
5767 of_property_read_bool((&pdev->dev)->of_node,
5768 "qcom,smmu-exist");
5769 PCIE_DBG(&msm_pcie_dev[rc_idx],
5770 "SMMU does %s exist.\n",
5771 msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
5772
5773 msm_pcie_dev[rc_idx].smmu_sid_base = 0;
5774 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
5775 &msm_pcie_dev[rc_idx].smmu_sid_base);
5776 if (ret)
5777 PCIE_DBG(&msm_pcie_dev[rc_idx],
5778 "RC%d SMMU sid base not found\n",
5779 msm_pcie_dev[rc_idx].rc_idx);
5780 else
5781 PCIE_DBG(&msm_pcie_dev[rc_idx],
5782 "RC%d: qcom,smmu-sid-base: 0x%x.\n",
5783 msm_pcie_dev[rc_idx].rc_idx,
5784 msm_pcie_dev[rc_idx].smmu_sid_base);
5785
Tony Truong9f2c7722017-02-28 15:02:27 -08005786 msm_pcie_dev[rc_idx].boot_option = 0;
5787 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,boot-option",
5788 &msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07005789 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08005790 "PCIe: RC%d boot option is 0x%x.\n",
5791 rc_idx, msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07005792
5793 msm_pcie_dev[rc_idx].phy_ver = 1;
5794 ret = of_property_read_u32((&pdev->dev)->of_node,
5795 "qcom,pcie-phy-ver",
5796 &msm_pcie_dev[rc_idx].phy_ver);
5797 if (ret)
5798 PCIE_DBG(&msm_pcie_dev[rc_idx],
5799 "RC%d: pcie-phy-ver does not exist.\n",
5800 msm_pcie_dev[rc_idx].rc_idx);
5801 else
5802 PCIE_DBG(&msm_pcie_dev[rc_idx],
5803 "RC%d: pcie-phy-ver: %d.\n",
5804 msm_pcie_dev[rc_idx].rc_idx,
5805 msm_pcie_dev[rc_idx].phy_ver);
5806
5807 msm_pcie_dev[rc_idx].n_fts = 0;
5808 ret = of_property_read_u32((&pdev->dev)->of_node,
5809 "qcom,n-fts",
5810 &msm_pcie_dev[rc_idx].n_fts);
5811
5812 if (ret)
5813 PCIE_DBG(&msm_pcie_dev[rc_idx],
5814 "n-fts does not exist. ret=%d\n", ret);
5815 else
5816 PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
5817 msm_pcie_dev[rc_idx].n_fts);
5818
Tony Truong24e02ba2017-08-30 14:53:14 -07005819 msm_pcie_dev[rc_idx].max_link_speed = GEN2_SPEED;
5820 ret = of_property_read_u32(pdev->dev.of_node,
5821 "qcom,max-link-speed",
5822 &msm_pcie_dev[rc_idx].max_link_speed);
5823 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC%d: max-link-speed: 0x%x.\n",
5824 rc_idx, msm_pcie_dev[rc_idx].max_link_speed);
5825
Tony Truong349ee492014-10-01 17:35:56 -07005826 msm_pcie_dev[rc_idx].ext_ref_clk =
5827 of_property_read_bool((&pdev->dev)->of_node,
5828 "qcom,ext-ref-clk");
5829 PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
5830 msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
5831
5832 msm_pcie_dev[rc_idx].ep_latency = 0;
5833 ret = of_property_read_u32((&pdev->dev)->of_node,
5834 "qcom,ep-latency",
5835 &msm_pcie_dev[rc_idx].ep_latency);
5836 if (ret)
5837 PCIE_DBG(&msm_pcie_dev[rc_idx],
5838 "RC%d: ep-latency does not exist.\n",
5839 rc_idx);
5840 else
5841 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
5842 rc_idx, msm_pcie_dev[rc_idx].ep_latency);
5843
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +05305844 msm_pcie_dev[rc_idx].switch_latency = 0;
5845 ret = of_property_read_u32((&pdev->dev)->of_node,
5846 "qcom,switch-latency",
5847 &msm_pcie_dev[rc_idx].switch_latency);
5848
5849 if (ret)
5850 PCIE_DBG(&msm_pcie_dev[rc_idx],
5851 "RC%d: switch-latency does not exist.\n",
5852 rc_idx);
5853 else
5854 PCIE_DBG(&msm_pcie_dev[rc_idx],
5855 "RC%d: switch-latency: 0x%x.\n",
5856 rc_idx, msm_pcie_dev[rc_idx].switch_latency);
5857
Tony Truong349ee492014-10-01 17:35:56 -07005858 msm_pcie_dev[rc_idx].wr_halt_size = 0;
5859 ret = of_property_read_u32(pdev->dev.of_node,
5860 "qcom,wr-halt-size",
5861 &msm_pcie_dev[rc_idx].wr_halt_size);
5862 if (ret)
5863 PCIE_DBG(&msm_pcie_dev[rc_idx],
5864 "RC%d: wr-halt-size not specified in dt. Use default value.\n",
5865 rc_idx);
5866 else
5867 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
5868 rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
5869
Tony Truong41e63ec2017-08-30 12:08:12 -07005870 msm_pcie_dev[rc_idx].slv_addr_space_size = SZ_16M;
5871 ret = of_property_read_u32(pdev->dev.of_node,
5872 "qcom,slv-addr-space-size",
5873 &msm_pcie_dev[rc_idx].slv_addr_space_size);
5874 PCIE_DBG(&msm_pcie_dev[rc_idx],
5875 "RC%d: slv-addr-space-size: 0x%x.\n",
5876 rc_idx, msm_pcie_dev[rc_idx].slv_addr_space_size);
5877
Tony Truong349ee492014-10-01 17:35:56 -07005878 msm_pcie_dev[rc_idx].cpl_timeout = 0;
5879 ret = of_property_read_u32((&pdev->dev)->of_node,
5880 "qcom,cpl-timeout",
5881 &msm_pcie_dev[rc_idx].cpl_timeout);
5882 if (ret)
5883 PCIE_DBG(&msm_pcie_dev[rc_idx],
5884 "RC%d: Using default cpl-timeout.\n",
5885 rc_idx);
5886 else
5887 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
5888 rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
5889
5890 msm_pcie_dev[rc_idx].perst_delay_us_min =
5891 PERST_PROPAGATION_DELAY_US_MIN;
5892 ret = of_property_read_u32(pdev->dev.of_node,
5893 "qcom,perst-delay-us-min",
5894 &msm_pcie_dev[rc_idx].perst_delay_us_min);
5895 if (ret)
5896 PCIE_DBG(&msm_pcie_dev[rc_idx],
5897 "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
5898 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
5899 else
5900 PCIE_DBG(&msm_pcie_dev[rc_idx],
5901 "RC%d: perst-delay-us-min: %dus.\n",
5902 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
5903
5904 msm_pcie_dev[rc_idx].perst_delay_us_max =
5905 PERST_PROPAGATION_DELAY_US_MAX;
5906 ret = of_property_read_u32(pdev->dev.of_node,
5907 "qcom,perst-delay-us-max",
5908 &msm_pcie_dev[rc_idx].perst_delay_us_max);
5909 if (ret)
5910 PCIE_DBG(&msm_pcie_dev[rc_idx],
5911 "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
5912 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
5913 else
5914 PCIE_DBG(&msm_pcie_dev[rc_idx],
5915 "RC%d: perst-delay-us-max: %dus.\n",
5916 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
5917
5918 msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
5919 ret = of_property_read_u32(pdev->dev.of_node,
5920 "qcom,tlp-rd-size",
5921 &msm_pcie_dev[rc_idx].tlp_rd_size);
5922 if (ret)
5923 PCIE_DBG(&msm_pcie_dev[rc_idx],
5924 "RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
5925 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
5926 else
5927 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
5928 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
5929
5930 msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
5931 msm_pcie_dev[rc_idx].msi_gicm_base = 0;
5932 ret = of_property_read_u32((&pdev->dev)->of_node,
5933 "qcom,msi-gicm-addr",
5934 &msm_pcie_dev[rc_idx].msi_gicm_addr);
5935
5936 if (ret) {
5937 PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
5938 "msi-gicm-addr does not exist.\n");
5939 } else {
5940 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
5941 msm_pcie_dev[rc_idx].msi_gicm_addr);
5942
5943 ret = of_property_read_u32((&pdev->dev)->of_node,
5944 "qcom,msi-gicm-base",
5945 &msm_pcie_dev[rc_idx].msi_gicm_base);
5946
5947 if (ret) {
5948 PCIE_ERR(&msm_pcie_dev[rc_idx],
5949 "PCIe: RC%d: msi-gicm-base does not exist.\n",
5950 rc_idx);
5951 goto decrease_rc_num;
5952 } else {
5953 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
5954 msm_pcie_dev[rc_idx].msi_gicm_base);
5955 }
5956 }
5957
5958 msm_pcie_dev[rc_idx].scm_dev_id = 0;
5959 ret = of_property_read_u32((&pdev->dev)->of_node,
5960 "qcom,scm-dev-id",
5961 &msm_pcie_dev[rc_idx].scm_dev_id);
5962
5963 msm_pcie_dev[rc_idx].rc_idx = rc_idx;
5964 msm_pcie_dev[rc_idx].pdev = pdev;
5965 msm_pcie_dev[rc_idx].vreg_n = 0;
5966 msm_pcie_dev[rc_idx].gpio_n = 0;
5967 msm_pcie_dev[rc_idx].parf_deemph = 0;
5968 msm_pcie_dev[rc_idx].parf_swing = 0;
5969 msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
5970 msm_pcie_dev[rc_idx].user_suspend = false;
5971 msm_pcie_dev[rc_idx].disable_pc = false;
5972 msm_pcie_dev[rc_idx].saved_state = NULL;
5973 msm_pcie_dev[rc_idx].enumerated = false;
5974 msm_pcie_dev[rc_idx].num_active_ep = 0;
5975 msm_pcie_dev[rc_idx].num_ep = 0;
5976 msm_pcie_dev[rc_idx].pending_ep_reg = false;
5977 msm_pcie_dev[rc_idx].phy_len = 0;
Tony Truong349ee492014-10-01 17:35:56 -07005978 msm_pcie_dev[rc_idx].phy_sequence = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07005979 msm_pcie_dev[rc_idx].event_reg = NULL;
5980 msm_pcie_dev[rc_idx].linkdown_counter = 0;
5981 msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
5982 msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
5983 msm_pcie_dev[rc_idx].rc_corr_counter = 0;
5984 msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
5985 msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
5986 msm_pcie_dev[rc_idx].ep_corr_counter = 0;
5987 msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
5988 msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
5989 msm_pcie_dev[rc_idx].suspending = false;
5990 msm_pcie_dev[rc_idx].wake_counter = 0;
5991 msm_pcie_dev[rc_idx].aer_enable = true;
Tony Truong7416d722017-09-12 16:45:18 -07005992 if (msm_pcie_invert_aer_support)
5993 msm_pcie_dev[rc_idx].aer_enable = false;
Tony Truong349ee492014-10-01 17:35:56 -07005994 msm_pcie_dev[rc_idx].power_on = false;
Tony Truong349ee492014-10-01 17:35:56 -07005995 msm_pcie_dev[rc_idx].use_msi = false;
5996 msm_pcie_dev[rc_idx].use_pinctrl = false;
5997 msm_pcie_dev[rc_idx].linkdown_panic = false;
5998 msm_pcie_dev[rc_idx].bridge_found = false;
5999 memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
6000 sizeof(msm_pcie_vreg_info));
6001 memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
6002 sizeof(msm_pcie_gpio_info));
6003 memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
6004 sizeof(msm_pcie_clk_info[rc_idx]));
6005 memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
6006 sizeof(msm_pcie_pipe_clk_info[rc_idx]));
6007 memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
6008 sizeof(msm_pcie_res_info));
6009 memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
6010 sizeof(msm_pcie_irq_info));
6011 memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
6012 sizeof(msm_pcie_msi_info));
6013 memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
6014 sizeof(msm_pcie_reset_info[rc_idx]));
6015 memcpy(msm_pcie_dev[rc_idx].pipe_reset,
6016 msm_pcie_pipe_reset_info[rc_idx],
6017 sizeof(msm_pcie_pipe_reset_info[rc_idx]));
6018 msm_pcie_dev[rc_idx].shadow_en = true;
6019 for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
6020 msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
6021 for (i = 0; i < MAX_DEVICE_NUM; i++)
6022 for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
6023 msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
6024 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6025 msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
6026 msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
6027 msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
6028 msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
6029 msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006030 msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07006031 msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
6032 msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
6033 msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
6034 msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
6035 }
6036
Tony Truongbd9a3412017-02-27 18:30:13 -08006037 dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
Tony Truongbd9a3412017-02-27 18:30:13 -08006038
Tony Truong349ee492014-10-01 17:35:56 -07006039 ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
6040 msm_pcie_dev[rc_idx].pdev);
6041
6042 if (ret)
6043 goto decrease_rc_num;
6044
6045 msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
6046 if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
6047 PCIE_ERR(&msm_pcie_dev[rc_idx],
6048 "PCIe: RC%d failed to get pinctrl\n",
6049 rc_idx);
6050 else
6051 msm_pcie_dev[rc_idx].use_pinctrl = true;
6052
6053 if (msm_pcie_dev[rc_idx].use_pinctrl) {
6054 msm_pcie_dev[rc_idx].pins_default =
6055 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6056 "default");
6057 if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
6058 PCIE_ERR(&msm_pcie_dev[rc_idx],
6059 "PCIe: RC%d could not get pinctrl default state\n",
6060 rc_idx);
6061 msm_pcie_dev[rc_idx].pins_default = NULL;
6062 }
6063
6064 msm_pcie_dev[rc_idx].pins_sleep =
6065 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6066 "sleep");
6067 if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
6068 PCIE_ERR(&msm_pcie_dev[rc_idx],
6069 "PCIe: RC%d could not get pinctrl sleep state\n",
6070 rc_idx);
6071 msm_pcie_dev[rc_idx].pins_sleep = NULL;
6072 }
6073 }
6074
6075 ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
6076 if (ret) {
6077 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6078 goto decrease_rc_num;
6079 }
6080
6081 ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
6082 if (ret) {
6083 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6084 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6085 goto decrease_rc_num;
6086 }
6087
Tony Truong14a5ddf2017-04-20 11:04:03 -07006088 msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
6089
Tony Truong349ee492014-10-01 17:35:56 -07006090 msm_pcie_dev[rc_idx].drv_ready = true;
6091
Tony Truong9f2c7722017-02-28 15:02:27 -08006092 if (msm_pcie_dev[rc_idx].boot_option &
6093 MSM_PCIE_NO_PROBE_ENUMERATION) {
Tony Truong349ee492014-10-01 17:35:56 -07006094 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08006095 "PCIe: RC%d will be enumerated by client or endpoint.\n",
Tony Truong349ee492014-10-01 17:35:56 -07006096 rc_idx);
6097 mutex_unlock(&pcie_drv.drv_lock);
6098 return 0;
6099 }
6100
6101 ret = msm_pcie_enumerate(rc_idx);
6102
6103 if (ret)
6104 PCIE_ERR(&msm_pcie_dev[rc_idx],
6105 "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
6106 rc_idx);
6107 else
6108 PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
6109 rc_idx);
6110
6111 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
6112 dev_name(&(pdev->dev)));
6113
6114 mutex_unlock(&pcie_drv.drv_lock);
6115 return 0;
6116
6117decrease_rc_num:
6118 pcie_drv.rc_num--;
6119out:
6120 if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
6121 pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
6122 rc_idx);
6123 else
6124 PCIE_ERR(&msm_pcie_dev[rc_idx],
6125 "PCIe: Driver probe failed for RC%d:%d\n",
6126 rc_idx, ret);
6127
6128 mutex_unlock(&pcie_drv.drv_lock);
6129
6130 return ret;
6131}
6132
6133static int msm_pcie_remove(struct platform_device *pdev)
6134{
6135 int ret = 0;
6136 int rc_idx;
6137
6138 PCIE_GEN_DBG("PCIe:%s.\n", __func__);
6139
6140 mutex_lock(&pcie_drv.drv_lock);
6141
6142 ret = of_property_read_u32((&pdev->dev)->of_node,
6143 "cell-index", &rc_idx);
6144 if (ret) {
6145 pr_err("%s: Did not find RC index.\n", __func__);
6146 goto out;
6147 } else {
6148 pcie_drv.rc_num--;
6149 PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
6150 }
6151
6152 msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
6153 msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
6154 msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
6155 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6156 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6157
6158out:
6159 mutex_unlock(&pcie_drv.drv_lock);
6160
6161 return ret;
6162}
6163
6164static const struct of_device_id msm_pcie_match[] = {
6165 { .compatible = "qcom,pci-msm",
6166 },
6167 {}
6168};
6169
6170static struct platform_driver msm_pcie_driver = {
6171 .probe = msm_pcie_probe,
6172 .remove = msm_pcie_remove,
6173 .driver = {
6174 .name = "pci-msm",
6175 .owner = THIS_MODULE,
6176 .of_match_table = msm_pcie_match,
6177 },
6178};
6179
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006180static int __init pcie_init(void)
Tony Truong349ee492014-10-01 17:35:56 -07006181{
6182 int ret = 0, i;
6183 char rc_name[MAX_RC_NAME_LEN];
6184
6185 pr_alert("pcie:%s.\n", __func__);
6186
6187 pcie_drv.rc_num = 0;
6188 mutex_init(&pcie_drv.drv_lock);
Tony Truong349ee492014-10-01 17:35:56 -07006189
6190 for (i = 0; i < MAX_RC_NUM; i++) {
6191 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
6192 msm_pcie_dev[i].ipc_log =
6193 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6194 if (msm_pcie_dev[i].ipc_log == NULL)
6195 pr_err("%s: unable to create IPC log context for %s\n",
6196 __func__, rc_name);
6197 else
6198 PCIE_DBG(&msm_pcie_dev[i],
6199 "PCIe IPC logging is enable for RC%d\n",
6200 i);
6201 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
6202 msm_pcie_dev[i].ipc_log_long =
6203 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6204 if (msm_pcie_dev[i].ipc_log_long == NULL)
6205 pr_err("%s: unable to create IPC log context for %s\n",
6206 __func__, rc_name);
6207 else
6208 PCIE_DBG(&msm_pcie_dev[i],
6209 "PCIe IPC logging %s is enable for RC%d\n",
6210 rc_name, i);
6211 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
6212 msm_pcie_dev[i].ipc_log_dump =
6213 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6214 if (msm_pcie_dev[i].ipc_log_dump == NULL)
6215 pr_err("%s: unable to create IPC log context for %s\n",
6216 __func__, rc_name);
6217 else
6218 PCIE_DBG(&msm_pcie_dev[i],
6219 "PCIe IPC logging %s is enable for RC%d\n",
6220 rc_name, i);
6221 spin_lock_init(&msm_pcie_dev[i].cfg_lock);
6222 msm_pcie_dev[i].cfg_access = true;
6223 mutex_init(&msm_pcie_dev[i].enumerate_lock);
6224 mutex_init(&msm_pcie_dev[i].setup_lock);
6225 mutex_init(&msm_pcie_dev[i].recovery_lock);
6226 spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
6227 spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
6228 spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
6229 spin_lock_init(&msm_pcie_dev[i].aer_lock);
6230 msm_pcie_dev[i].drv_ready = false;
6231 }
6232 for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
6233 msm_pcie_dev_tbl[i].bdf = 0;
6234 msm_pcie_dev_tbl[i].dev = NULL;
6235 msm_pcie_dev_tbl[i].short_bdf = 0;
6236 msm_pcie_dev_tbl[i].sid = 0;
6237 msm_pcie_dev_tbl[i].domain = -1;
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006238 msm_pcie_dev_tbl[i].conf_base = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07006239 msm_pcie_dev_tbl[i].phy_address = 0;
6240 msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
6241 msm_pcie_dev_tbl[i].event_reg = NULL;
6242 msm_pcie_dev_tbl[i].registered = true;
6243 }
6244
6245 msm_pcie_debugfs_init();
6246
6247 ret = platform_driver_register(&msm_pcie_driver);
6248
6249 return ret;
6250}
6251
6252static void __exit pcie_exit(void)
6253{
Tony Truongbd9a3412017-02-27 18:30:13 -08006254 int i;
6255
Tony Truong349ee492014-10-01 17:35:56 -07006256 PCIE_GEN_DBG("pcie:%s.\n", __func__);
6257
6258 platform_driver_unregister(&msm_pcie_driver);
6259
6260 msm_pcie_debugfs_exit();
Tony Truongbd9a3412017-02-27 18:30:13 -08006261
6262 for (i = 0; i < MAX_RC_NUM; i++)
6263 msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
Tony Truong349ee492014-10-01 17:35:56 -07006264}
6265
6266subsys_initcall_sync(pcie_init);
6267module_exit(pcie_exit);
6268
6269
6270/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
6271static void msm_pcie_fixup_early(struct pci_dev *dev)
6272{
6273 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6274
6275 PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
6276 if (dev->hdr_type == 1)
6277 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
6278}
6279DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6280 msm_pcie_fixup_early);
6281
6282/* Suspend the PCIe link */
6283static int msm_pcie_pm_suspend(struct pci_dev *dev,
6284 void *user, void *data, u32 options)
6285{
6286 int ret = 0;
6287 u32 val = 0;
6288 int ret_l23;
6289 unsigned long irqsave_flags;
6290 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6291
6292 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6293
6294 spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
6295 pcie_dev->suspending = true;
6296 spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
6297
6298 if (!pcie_dev->power_on) {
6299 PCIE_DBG(pcie_dev,
6300 "PCIe: power of RC%d has been turned off.\n",
6301 pcie_dev->rc_idx);
6302 return ret;
6303 }
6304
6305 if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
6306 && msm_pcie_confirm_linkup(pcie_dev, true, true,
6307 pcie_dev->conf)) {
6308 ret = pci_save_state(dev);
6309 pcie_dev->saved_state = pci_store_saved_state(dev);
6310 }
6311 if (ret) {
6312 PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
6313 pcie_dev->rc_idx, ret);
6314 pcie_dev->suspending = false;
6315 return ret;
6316 }
6317
6318 spin_lock_irqsave(&pcie_dev->cfg_lock,
6319 pcie_dev->irqsave_flags);
6320 pcie_dev->cfg_access = false;
6321 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6322 pcie_dev->irqsave_flags);
6323
6324 msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
6325 BIT(4));
6326
6327 PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
6328 pcie_dev->rc_idx);
6329
6330 ret_l23 = readl_poll_timeout((pcie_dev->parf
6331 + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
6332
6333 /* check L23_Ready */
6334 PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
6335 pcie_dev->rc_idx,
6336 readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
6337 if (!ret_l23)
6338 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
6339 pcie_dev->rc_idx);
6340 else
6341 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
6342 pcie_dev->rc_idx);
6343
Tony Truong349ee492014-10-01 17:35:56 -07006344 if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
6345 pinctrl_select_state(pcie_dev->pinctrl,
6346 pcie_dev->pins_sleep);
6347
Tony Truong4e969782017-04-28 18:17:04 -07006348 msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6349
Tony Truong349ee492014-10-01 17:35:56 -07006350 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6351
6352 return ret;
6353}
6354
6355static void msm_pcie_fixup_suspend(struct pci_dev *dev)
6356{
6357 int ret;
6358 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6359
6360 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6361
6362 if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
6363 return;
6364
6365 spin_lock_irqsave(&pcie_dev->cfg_lock,
6366 pcie_dev->irqsave_flags);
6367 if (pcie_dev->disable_pc) {
6368 PCIE_DBG(pcie_dev,
6369 "RC%d: Skip suspend because of user request\n",
6370 pcie_dev->rc_idx);
6371 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6372 pcie_dev->irqsave_flags);
6373 return;
6374 }
6375 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6376 pcie_dev->irqsave_flags);
6377
6378 mutex_lock(&pcie_dev->recovery_lock);
6379
6380 ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
6381 if (ret)
6382 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
6383 pcie_dev->rc_idx, ret);
6384
6385 mutex_unlock(&pcie_dev->recovery_lock);
6386}
6387DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6388 msm_pcie_fixup_suspend);
6389
6390/* Resume the PCIe link */
6391static int msm_pcie_pm_resume(struct pci_dev *dev,
6392 void *user, void *data, u32 options)
6393{
6394 int ret;
6395 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6396
6397 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6398
6399 if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
6400 pinctrl_select_state(pcie_dev->pinctrl,
6401 pcie_dev->pins_default);
6402
6403 spin_lock_irqsave(&pcie_dev->cfg_lock,
6404 pcie_dev->irqsave_flags);
6405 pcie_dev->cfg_access = true;
6406 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6407 pcie_dev->irqsave_flags);
6408
6409 ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6410 if (ret) {
6411 PCIE_ERR(pcie_dev,
6412 "PCIe: RC%d fail to enable PCIe link in resume.\n",
6413 pcie_dev->rc_idx);
6414 return ret;
6415 }
6416
6417 pcie_dev->suspending = false;
6418 PCIE_DBG(pcie_dev,
6419 "dev->bus->number = %d dev->bus->primary = %d\n",
6420 dev->bus->number, dev->bus->primary);
6421
6422 if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
6423 PCIE_DBG(pcie_dev,
6424 "RC%d: entry of PCI framework restore state\n",
6425 pcie_dev->rc_idx);
6426
6427 pci_load_and_free_saved_state(dev,
6428 &pcie_dev->saved_state);
6429 pci_restore_state(dev);
6430
6431 PCIE_DBG(pcie_dev,
6432 "RC%d: exit of PCI framework restore state\n",
6433 pcie_dev->rc_idx);
6434 }
6435
6436 if (pcie_dev->bridge_found) {
6437 PCIE_DBG(pcie_dev,
6438 "RC%d: entry of PCIe recover config\n",
6439 pcie_dev->rc_idx);
6440
6441 msm_pcie_recover_config(dev);
6442
6443 PCIE_DBG(pcie_dev,
6444 "RC%d: exit of PCIe recover config\n",
6445 pcie_dev->rc_idx);
6446 }
6447
6448 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6449
6450 return ret;
6451}
6452
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006453static void msm_pcie_fixup_resume(struct pci_dev *dev)
Tony Truong349ee492014-10-01 17:35:56 -07006454{
6455 int ret;
6456 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6457
6458 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6459
6460 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6461 pcie_dev->user_suspend)
6462 return;
6463
6464 mutex_lock(&pcie_dev->recovery_lock);
6465 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6466 if (ret)
6467 PCIE_ERR(pcie_dev,
6468 "PCIe: RC%d got failure in fixup resume:%d.\n",
6469 pcie_dev->rc_idx, ret);
6470
6471 mutex_unlock(&pcie_dev->recovery_lock);
6472}
6473DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6474 msm_pcie_fixup_resume);
6475
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006476static void msm_pcie_fixup_resume_early(struct pci_dev *dev)
Tony Truong349ee492014-10-01 17:35:56 -07006477{
6478 int ret;
6479 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6480
6481 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6482
6483 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6484 pcie_dev->user_suspend)
6485 return;
6486
6487 mutex_lock(&pcie_dev->recovery_lock);
6488 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6489 if (ret)
6490 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
6491 pcie_dev->rc_idx, ret);
6492
6493 mutex_unlock(&pcie_dev->recovery_lock);
6494}
6495DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6496 msm_pcie_fixup_resume_early);
6497
6498int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
6499 void *data, u32 options)
6500{
6501 int i, ret = 0;
6502 struct pci_dev *dev;
6503 u32 rc_idx = 0;
6504 struct msm_pcie_dev_t *pcie_dev;
6505
6506 PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
6507 pm_opt, busnr, options);
6508
6509
6510 if (!user) {
6511 pr_err("PCIe: endpoint device is NULL\n");
6512 ret = -ENODEV;
6513 goto out;
6514 }
6515
6516 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
6517
6518 if (pcie_dev) {
6519 rc_idx = pcie_dev->rc_idx;
6520 PCIE_DBG(pcie_dev,
6521 "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
6522 rc_idx, pm_opt, busnr, options);
6523 } else {
6524 pr_err(
6525 "PCIe: did not find RC for pci endpoint device.\n"
6526 );
6527 ret = -ENODEV;
6528 goto out;
6529 }
6530
6531 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6532 if (!busnr)
6533 break;
6534 if (user == pcie_dev->pcidev_table[i].dev) {
6535 if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
6536 break;
6537
6538 PCIE_ERR(pcie_dev,
6539 "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
6540 pcie_dev->rc_idx, busnr,
6541 pcie_dev->pcidev_table[i].bdf >> 24);
6542 ret = MSM_PCIE_ERROR;
6543 goto out;
6544 }
6545 }
6546
6547 if (i == MAX_DEVICE_NUM) {
6548 PCIE_ERR(pcie_dev,
6549 "PCIe: RC%d: endpoint device was not found in device table",
6550 pcie_dev->rc_idx);
6551 ret = MSM_PCIE_ERROR;
6552 goto out;
6553 }
6554
6555 dev = msm_pcie_dev[rc_idx].dev;
6556
6557 if (!msm_pcie_dev[rc_idx].drv_ready) {
6558 PCIE_ERR(&msm_pcie_dev[rc_idx],
6559 "RC%d has not been successfully probed yet\n",
6560 rc_idx);
6561 return -EPROBE_DEFER;
6562 }
6563
6564 switch (pm_opt) {
6565 case MSM_PCIE_SUSPEND:
6566 PCIE_DBG(&msm_pcie_dev[rc_idx],
6567 "User of RC%d requests to suspend the link\n", rc_idx);
6568 if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
6569 PCIE_DBG(&msm_pcie_dev[rc_idx],
6570 "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
6571 rc_idx, msm_pcie_dev[rc_idx].link_status);
6572
6573 if (!msm_pcie_dev[rc_idx].power_on) {
6574 PCIE_ERR(&msm_pcie_dev[rc_idx],
6575 "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
6576 rc_idx, msm_pcie_dev[rc_idx].link_status);
6577 break;
6578 }
6579
6580 if (msm_pcie_dev[rc_idx].pending_ep_reg) {
6581 PCIE_DBG(&msm_pcie_dev[rc_idx],
6582 "PCIe: RC%d: request to suspend the link is rejected\n",
6583 rc_idx);
6584 break;
6585 }
6586
6587 if (pcie_dev->num_active_ep) {
6588 PCIE_DBG(pcie_dev,
6589 "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
6590 pcie_dev->rc_idx, pcie_dev->num_active_ep);
6591 return ret;
6592 }
6593
6594 msm_pcie_dev[rc_idx].user_suspend = true;
6595
6596 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6597
6598 ret = msm_pcie_pm_suspend(dev, user, data, options);
6599 if (ret) {
6600 PCIE_ERR(&msm_pcie_dev[rc_idx],
6601 "PCIe: RC%d: user failed to suspend the link.\n",
6602 rc_idx);
6603 msm_pcie_dev[rc_idx].user_suspend = false;
6604 }
6605
6606 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6607 break;
6608 case MSM_PCIE_RESUME:
6609 PCIE_DBG(&msm_pcie_dev[rc_idx],
6610 "User of RC%d requests to resume the link\n", rc_idx);
6611 if (msm_pcie_dev[rc_idx].link_status !=
6612 MSM_PCIE_LINK_DISABLED) {
6613 PCIE_ERR(&msm_pcie_dev[rc_idx],
6614 "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
6615 rc_idx, msm_pcie_dev[rc_idx].link_status,
6616 msm_pcie_dev[rc_idx].num_active_ep);
6617 break;
6618 }
6619
6620 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6621 ret = msm_pcie_pm_resume(dev, user, data, options);
6622 if (ret) {
6623 PCIE_ERR(&msm_pcie_dev[rc_idx],
6624 "PCIe: RC%d: user failed to resume the link.\n",
6625 rc_idx);
6626 } else {
6627 PCIE_DBG(&msm_pcie_dev[rc_idx],
6628 "PCIe: RC%d: user succeeded to resume the link.\n",
6629 rc_idx);
6630
6631 msm_pcie_dev[rc_idx].user_suspend = false;
6632 }
6633
6634 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6635
6636 break;
6637 case MSM_PCIE_DISABLE_PC:
6638 PCIE_DBG(&msm_pcie_dev[rc_idx],
6639 "User of RC%d requests to keep the link always alive.\n",
6640 rc_idx);
6641 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6642 msm_pcie_dev[rc_idx].irqsave_flags);
6643 if (msm_pcie_dev[rc_idx].suspending) {
6644 PCIE_ERR(&msm_pcie_dev[rc_idx],
6645 "PCIe: RC%d Link has been suspended before request\n",
6646 rc_idx);
6647 ret = MSM_PCIE_ERROR;
6648 } else {
6649 msm_pcie_dev[rc_idx].disable_pc = true;
6650 }
6651 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6652 msm_pcie_dev[rc_idx].irqsave_flags);
6653 break;
6654 case MSM_PCIE_ENABLE_PC:
6655 PCIE_DBG(&msm_pcie_dev[rc_idx],
6656 "User of RC%d cancels the request of alive link.\n",
6657 rc_idx);
6658 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6659 msm_pcie_dev[rc_idx].irqsave_flags);
6660 msm_pcie_dev[rc_idx].disable_pc = false;
6661 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6662 msm_pcie_dev[rc_idx].irqsave_flags);
6663 break;
6664 default:
6665 PCIE_ERR(&msm_pcie_dev[rc_idx],
6666 "PCIe: RC%d: unsupported pm operation:%d.\n",
6667 rc_idx, pm_opt);
6668 ret = -ENODEV;
6669 goto out;
6670 }
6671
6672out:
6673 return ret;
6674}
6675EXPORT_SYMBOL(msm_pcie_pm_control);
6676
6677int msm_pcie_register_event(struct msm_pcie_register_event *reg)
6678{
6679 int i, ret = 0;
6680 struct msm_pcie_dev_t *pcie_dev;
6681
6682 if (!reg) {
6683 pr_err("PCIe: Event registration is NULL\n");
6684 return -ENODEV;
6685 }
6686
6687 if (!reg->user) {
6688 pr_err("PCIe: User of event registration is NULL\n");
6689 return -ENODEV;
6690 }
6691
6692 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6693
6694 if (!pcie_dev) {
6695 PCIE_ERR(pcie_dev, "%s",
6696 "PCIe: did not find RC for pci endpoint device.\n");
6697 return -ENODEV;
6698 }
6699
6700 if (pcie_dev->num_ep > 1) {
6701 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6702 if (reg->user ==
6703 pcie_dev->pcidev_table[i].dev) {
6704 pcie_dev->event_reg =
6705 pcie_dev->pcidev_table[i].event_reg;
6706
6707 if (!pcie_dev->event_reg) {
6708 pcie_dev->pcidev_table[i].registered =
6709 true;
6710
6711 pcie_dev->num_active_ep++;
6712 PCIE_DBG(pcie_dev,
6713 "PCIe: RC%d: number of active EP(s): %d.\n",
6714 pcie_dev->rc_idx,
6715 pcie_dev->num_active_ep);
6716 }
6717
6718 pcie_dev->event_reg = reg;
6719 pcie_dev->pcidev_table[i].event_reg = reg;
6720 PCIE_DBG(pcie_dev,
6721 "Event 0x%x is registered for RC %d\n",
6722 reg->events,
6723 pcie_dev->rc_idx);
6724
6725 break;
6726 }
6727 }
6728
6729 if (pcie_dev->pending_ep_reg) {
6730 for (i = 0; i < MAX_DEVICE_NUM; i++)
6731 if (!pcie_dev->pcidev_table[i].registered)
6732 break;
6733
6734 if (i == MAX_DEVICE_NUM)
6735 pcie_dev->pending_ep_reg = false;
6736 }
6737 } else {
6738 pcie_dev->event_reg = reg;
6739 PCIE_DBG(pcie_dev,
6740 "Event 0x%x is registered for RC %d\n", reg->events,
6741 pcie_dev->rc_idx);
6742 }
6743
6744 return ret;
6745}
6746EXPORT_SYMBOL(msm_pcie_register_event);
6747
6748int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
6749{
6750 int i, ret = 0;
6751 struct msm_pcie_dev_t *pcie_dev;
6752
6753 if (!reg) {
6754 pr_err("PCIe: Event deregistration is NULL\n");
6755 return -ENODEV;
6756 }
6757
6758 if (!reg->user) {
6759 pr_err("PCIe: User of event deregistration is NULL\n");
6760 return -ENODEV;
6761 }
6762
6763 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6764
6765 if (!pcie_dev) {
6766 PCIE_ERR(pcie_dev, "%s",
6767 "PCIe: did not find RC for pci endpoint device.\n");
6768 return -ENODEV;
6769 }
6770
6771 if (pcie_dev->num_ep > 1) {
6772 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6773 if (reg->user == pcie_dev->pcidev_table[i].dev) {
6774 if (pcie_dev->pcidev_table[i].event_reg) {
6775 pcie_dev->num_active_ep--;
6776 PCIE_DBG(pcie_dev,
6777 "PCIe: RC%d: number of active EP(s) left: %d.\n",
6778 pcie_dev->rc_idx,
6779 pcie_dev->num_active_ep);
6780 }
6781
6782 pcie_dev->event_reg = NULL;
6783 pcie_dev->pcidev_table[i].event_reg = NULL;
6784 PCIE_DBG(pcie_dev,
6785 "Event is deregistered for RC %d\n",
6786 pcie_dev->rc_idx);
6787
6788 break;
6789 }
6790 }
6791 } else {
6792 pcie_dev->event_reg = NULL;
6793 PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
6794 pcie_dev->rc_idx);
6795 }
6796
6797 return ret;
6798}
6799EXPORT_SYMBOL(msm_pcie_deregister_event);
6800
6801int msm_pcie_recover_config(struct pci_dev *dev)
6802{
6803 int ret = 0;
6804 struct msm_pcie_dev_t *pcie_dev;
6805
6806 if (dev) {
6807 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6808 PCIE_DBG(pcie_dev,
6809 "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
6810 } else {
6811 pr_err("PCIe: the input pci dev is NULL.\n");
6812 return -ENODEV;
6813 }
6814
6815 if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
6816 PCIE_DBG(pcie_dev,
6817 "Recover config space of RC%d and its EP\n",
6818 pcie_dev->rc_idx);
6819 pcie_dev->shadow_en = false;
6820 PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
6821 msm_pcie_cfg_recover(pcie_dev, true);
6822 PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
6823 msm_pcie_cfg_recover(pcie_dev, false);
6824 PCIE_DBG(pcie_dev,
6825 "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
6826 pcie_dev->rc_idx);
6827 pci_save_state(pcie_dev->dev);
6828 pci_save_state(dev);
6829 pcie_dev->shadow_en = true;
6830 PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
6831 pcie_dev->rc_idx);
6832 } else {
6833 PCIE_ERR(pcie_dev,
6834 "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
6835 pcie_dev->rc_idx);
6836 ret = -ENODEV;
6837 }
6838
6839 return ret;
6840}
6841EXPORT_SYMBOL(msm_pcie_recover_config);
6842
6843int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
6844{
6845 int ret = 0;
6846 struct msm_pcie_dev_t *pcie_dev;
6847
6848 if (dev) {
6849 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6850 PCIE_DBG(pcie_dev,
6851 "User requests to %s shadow\n",
6852 enable ? "enable" : "disable");
6853 } else {
6854 pr_err("PCIe: the input pci dev is NULL.\n");
6855 return -ENODEV;
6856 }
6857
6858 PCIE_DBG(pcie_dev,
6859 "The shadowing of RC%d is %s enabled currently.\n",
6860 pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
6861
6862 pcie_dev->shadow_en = enable;
6863
6864 PCIE_DBG(pcie_dev,
6865 "Shadowing of RC%d is turned %s upon user's request.\n",
6866 pcie_dev->rc_idx, enable ? "on" : "off");
6867
6868 return ret;
6869}
6870EXPORT_SYMBOL(msm_pcie_shadow_control);