blob: 20d48a0b5e07738402d7e358f37e23bb5ce8d452 [file] [log] [blame]
Tony Truonge023d012017-11-10 13:36:26 -08001/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
Tony Truong349ee492014-10-01 17:35:56 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
Tony Truong74ee0fd2017-10-06 19:37:43 -070022#include <linux/jiffies.h>
Tony Truong349ee492014-10-01 17:35:56 -070023#include <linux/gpio.h>
24#include <linux/iopoll.h>
25#include <linux/kernel.h>
26#include <linux/of_pci.h>
27#include <linux/pci.h>
Tony Truong52122a62017-03-23 18:00:34 -070028#include <linux/iommu.h>
Tony Truong349ee492014-10-01 17:35:56 -070029#include <linux/platform_device.h>
30#include <linux/regulator/consumer.h>
Tony Truongb213ac12017-04-05 15:21:20 -070031#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
Tony Truong349ee492014-10-01 17:35:56 -070032#include <linux/slab.h>
33#include <linux/types.h>
34#include <linux/of_gpio.h>
Tony Truongb213ac12017-04-05 15:21:20 -070035#include <linux/clk/qcom.h>
Tony Truong349ee492014-10-01 17:35:56 -070036#include <linux/reset.h>
37#include <linux/msm-bus.h>
38#include <linux/msm-bus-board.h>
Tony Truongbad3b742017-11-22 14:40:19 -080039#include <linux/seq_file.h>
Tony Truong349ee492014-10-01 17:35:56 -070040#include <linux/debugfs.h>
41#include <linux/uaccess.h>
42#include <linux/io.h>
43#include <linux/msi.h>
44#include <linux/interrupt.h>
45#include <linux/irq.h>
46#include <linux/irqdomain.h>
47#include <linux/pm_wakeup.h>
48#include <linux/compiler.h>
49#include <soc/qcom/scm.h>
50#include <linux/ipc_logging.h>
51#include <linux/msm_pcie.h>
52
Tony Truong8ff900c2017-12-20 11:21:03 -080053#define PCIE_VENDOR_ID_QCOM 0x17cb
Tony Truongb213ac12017-04-05 15:21:20 -070054
55#define PCIE20_L1SUB_CONTROL1 0x1E4
56#define PCIE20_PARF_DBI_BASE_ADDR 0x350
57#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
58
Tony Truongb213ac12017-04-05 15:21:20 -070059#define PCS_BASE 0x800
Tony Truongb213ac12017-04-05 15:21:20 -070060
Tony Truong232cf4d2017-08-22 18:28:24 -070061#define PCS_PORT(n) (PCS_BASE + n * 0x1000)
Tony Truong349ee492014-10-01 17:35:56 -070062
Tony Truong232cf4d2017-08-22 18:28:24 -070063#define PCIE_N_SW_RESET(n) (PCS_PORT(n) + 0x00)
64#define PCIE_N_POWER_DOWN_CONTROL(n) (PCS_PORT(n) + 0x04)
Tony Truong349ee492014-10-01 17:35:56 -070065
Tony Truongc275fe02017-04-18 19:04:20 -070066#define PCIE_GEN3_COM_INTEGLOOP_GAIN1_MODE0 0x0154
67#define PCIE_GEN3_L0_DRVR_CTRL0 0x080c
68#define PCIE_GEN3_L0_RESET_GEN 0x0890
69#define PCIE_GEN3_L0_BIST_ERR_CNT1_STATUS 0x08a8
70#define PCIE_GEN3_L0_BIST_ERR_CNT2_STATUS 0x08ac
71#define PCIE_GEN3_L0_DEBUG_BUS_STATUS4 0x08bc
Tony Truongc275fe02017-04-18 19:04:20 -070072
Tony Truong349ee492014-10-01 17:35:56 -070073#define PCIE20_PARF_SYS_CTRL 0x00
Tony Truongb213ac12017-04-05 15:21:20 -070074#define PCIE20_PARF_PM_CTRL 0x20
Tony Truong349ee492014-10-01 17:35:56 -070075#define PCIE20_PARF_PM_STTS 0x24
76#define PCIE20_PARF_PCS_DEEMPH 0x34
77#define PCIE20_PARF_PCS_SWING 0x38
78#define PCIE20_PARF_PHY_CTRL 0x40
79#define PCIE20_PARF_PHY_REFCLK 0x4C
80#define PCIE20_PARF_CONFIG_BITS 0x50
81#define PCIE20_PARF_TEST_BUS 0xE4
82#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
83#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
84#define PCIE20_PARF_LTSSM 0x1B0
85#define PCIE20_PARF_INT_ALL_STATUS 0x224
86#define PCIE20_PARF_INT_ALL_CLEAR 0x228
87#define PCIE20_PARF_INT_ALL_MASK 0x22C
88#define PCIE20_PARF_SID_OFFSET 0x234
89#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
90#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
Tony Truongb213ac12017-04-05 15:21:20 -070091#define PCIE20_PARF_DEVICE_TYPE 0x1000
Tony Truong349ee492014-10-01 17:35:56 -070092
93#define PCIE20_ELBI_VERSION 0x00
94#define PCIE20_ELBI_SYS_CTRL 0x04
95#define PCIE20_ELBI_SYS_STTS 0x08
96
97#define PCIE20_CAP 0x70
98#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
99#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
100
101#define PCIE20_COMMAND_STATUS 0x04
102#define PCIE20_HEADER_TYPE 0x0C
103#define PCIE20_BUSNUMBERS 0x18
104#define PCIE20_MEMORY_BASE_LIMIT 0x20
105#define PCIE20_BRIDGE_CTRL 0x3C
106#define PCIE20_DEVICE_CONTROL_STATUS 0x78
107#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
108
109#define PCIE20_AUX_CLK_FREQ_REG 0xB40
110#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
111#define PCIE20_ACK_N_FTS 0xff00
112
113#define PCIE20_PLR_IATU_VIEWPORT 0x900
114#define PCIE20_PLR_IATU_CTRL1 0x904
115#define PCIE20_PLR_IATU_CTRL2 0x908
116#define PCIE20_PLR_IATU_LBAR 0x90C
117#define PCIE20_PLR_IATU_UBAR 0x910
118#define PCIE20_PLR_IATU_LAR 0x914
119#define PCIE20_PLR_IATU_LTAR 0x918
120#define PCIE20_PLR_IATU_UTAR 0x91c
121
Tony Truongf49801f2017-10-25 11:22:35 -0700122#define PCIE_IATU_BASE(n) (n * 0x200)
123
124#define PCIE_IATU_CTRL1(n) (PCIE_IATU_BASE(n) + 0x00)
125#define PCIE_IATU_CTRL2(n) (PCIE_IATU_BASE(n) + 0x04)
126#define PCIE_IATU_LBAR(n) (PCIE_IATU_BASE(n) + 0x08)
127#define PCIE_IATU_UBAR(n) (PCIE_IATU_BASE(n) + 0x0c)
128#define PCIE_IATU_LAR(n) (PCIE_IATU_BASE(n) + 0x10)
129#define PCIE_IATU_LTAR(n) (PCIE_IATU_BASE(n) + 0x14)
130#define PCIE_IATU_UTAR(n) (PCIE_IATU_BASE(n) + 0x18)
Tony Truong06ff2ed2017-01-15 19:28:13 -0800131
132#define PCIE20_PORT_LINK_CTRL_REG 0x710
133#define PCIE20_GEN3_RELATED_REG 0x890
134#define PCIE20_PIPE_LOOPBACK_CONTROL 0x8b8
135#define LOOPBACK_BASE_ADDR_OFFSET 0x8000
136
Tony Truong349ee492014-10-01 17:35:56 -0700137#define PCIE20_CTRL1_TYPE_CFG0 0x04
138#define PCIE20_CTRL1_TYPE_CFG1 0x05
139
140#define PCIE20_CAP_ID 0x10
141#define L1SUB_CAP_ID 0x1E
142
143#define PCIE_CAP_PTR_OFFSET 0x34
144#define PCIE_EXT_CAP_OFFSET 0x100
145
146#define PCIE20_AER_UNCORR_ERR_STATUS_REG 0x104
147#define PCIE20_AER_CORR_ERR_STATUS_REG 0x110
148#define PCIE20_AER_ROOT_ERR_STATUS_REG 0x130
149#define PCIE20_AER_ERR_SRC_ID_REG 0x134
150
151#define RD 0
152#define WR 1
153#define MSM_PCIE_ERROR -1
154
155#define PERST_PROPAGATION_DELAY_US_MIN 1000
156#define PERST_PROPAGATION_DELAY_US_MAX 1005
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +0530157#define SWITCH_DELAY_MAX 20
Tony Truong349ee492014-10-01 17:35:56 -0700158#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
159#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
160#define LINK_UP_TIMEOUT_US_MIN 5000
161#define LINK_UP_TIMEOUT_US_MAX 5100
162#define LINK_UP_CHECK_MAX_COUNT 20
Tony Truong74ee0fd2017-10-06 19:37:43 -0700163#define EP_UP_TIMEOUT_US_MIN 1000
164#define EP_UP_TIMEOUT_US_MAX 1005
165#define EP_UP_TIMEOUT_US 1000000
Tony Truong349ee492014-10-01 17:35:56 -0700166#define PHY_STABILIZATION_DELAY_US_MIN 995
167#define PHY_STABILIZATION_DELAY_US_MAX 1005
168#define POWER_DOWN_DELAY_US_MIN 10
169#define POWER_DOWN_DELAY_US_MAX 11
170#define LINKDOWN_INIT_WAITING_US_MIN 995
171#define LINKDOWN_INIT_WAITING_US_MAX 1005
172#define LINKDOWN_WAITING_US_MIN 4900
173#define LINKDOWN_WAITING_US_MAX 5100
174#define LINKDOWN_WAITING_COUNT 200
175
Tony Truong24e02ba2017-08-30 14:53:14 -0700176#define GEN1_SPEED 0x1
177#define GEN2_SPEED 0x2
178#define GEN3_SPEED 0x3
179
Tony Truong349ee492014-10-01 17:35:56 -0700180#define PHY_READY_TIMEOUT_COUNT 10
181#define XMLH_LINK_UP 0x400
182#define MAX_LINK_RETRIES 5
183#define MAX_BUS_NUM 3
184#define MAX_PROP_SIZE 32
185#define MAX_RC_NAME_LEN 15
186#define MSM_PCIE_MAX_VREG 4
Tony Truong4c1b3be2017-12-12 11:06:18 -0800187#define MSM_PCIE_MAX_CLK 13
Tony Truong349ee492014-10-01 17:35:56 -0700188#define MSM_PCIE_MAX_PIPE_CLK 1
189#define MAX_RC_NUM 3
190#define MAX_DEVICE_NUM 20
191#define MAX_SHORT_BDF_NUM 16
192#define PCIE_TLP_RD_SIZE 0x5
193#define PCIE_MSI_NR_IRQS 256
194#define MSM_PCIE_MAX_MSI 32
Tony Truong349ee492014-10-01 17:35:56 -0700195#define PCIE_LOG_PAGES (50)
196#define PCIE_CONF_SPACE_DW 1024
197#define PCIE_CLEAR 0xDEADBEEF
198#define PCIE_LINK_DOWN 0xFFFFFFFF
199
Tony Truongb213ac12017-04-05 15:21:20 -0700200#define MSM_PCIE_MAX_RESET 5
Tony Truong349ee492014-10-01 17:35:56 -0700201#define MSM_PCIE_MAX_PIPE_RESET 1
202
203#define MSM_PCIE_MSI_PHY 0xa0000000
204#define PCIE20_MSI_CTRL_ADDR (0x820)
205#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
206#define PCIE20_MSI_CTRL_INTR_EN (0x828)
207#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
208#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
209#define PCIE20_MSI_CTRL_MAX 8
210
211/* PM control options */
212#define PM_IRQ 0x1
213#define PM_CLK 0x2
214#define PM_GPIO 0x4
215#define PM_VREG 0x8
216#define PM_PIPE_CLK 0x10
217#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
218
219#ifdef CONFIG_PHYS_ADDR_T_64BIT
220#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
221#else
222#define PCIE_UPPER_ADDR(addr) (0x0)
223#endif
224#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
225
Tony Truong09223e42017-11-08 16:50:20 -0800226#define PCIE_BUS_PRIV_DATA(bus) \
227 (struct msm_pcie_dev_t *)(bus->sysdata)
228
Tony Truong349ee492014-10-01 17:35:56 -0700229/* Config Space Offsets */
230#define BDF_OFFSET(bus, devfn) \
231 ((bus << 24) | (devfn << 16))
232
233#define PCIE_GEN_DBG(x...) do { \
234 if (msm_pcie_debug_mask) \
235 pr_alert(x); \
236 } while (0)
237
238#define PCIE_DBG(dev, fmt, arg...) do { \
239 if ((dev) && (dev)->ipc_log_long) \
240 ipc_log_string((dev)->ipc_log_long, \
241 "DBG1:%s: " fmt, __func__, arg); \
242 if ((dev) && (dev)->ipc_log) \
243 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
244 if (msm_pcie_debug_mask) \
245 pr_alert("%s: " fmt, __func__, arg); \
246 } while (0)
247
248#define PCIE_DBG2(dev, fmt, arg...) do { \
249 if ((dev) && (dev)->ipc_log) \
250 ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
251 if (msm_pcie_debug_mask) \
252 pr_alert("%s: " fmt, __func__, arg); \
253 } while (0)
254
255#define PCIE_DBG3(dev, fmt, arg...) do { \
256 if ((dev) && (dev)->ipc_log) \
257 ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
258 if (msm_pcie_debug_mask) \
259 pr_alert("%s: " fmt, __func__, arg); \
260 } while (0)
261
262#define PCIE_DUMP(dev, fmt, arg...) do { \
263 if ((dev) && (dev)->ipc_log_dump) \
264 ipc_log_string((dev)->ipc_log_dump, \
265 "DUMP:%s: " fmt, __func__, arg); \
266 } while (0)
267
268#define PCIE_DBG_FS(dev, fmt, arg...) do { \
269 if ((dev) && (dev)->ipc_log_dump) \
270 ipc_log_string((dev)->ipc_log_dump, \
271 "DBG_FS:%s: " fmt, __func__, arg); \
272 pr_alert("%s: " fmt, __func__, arg); \
273 } while (0)
274
275#define PCIE_INFO(dev, fmt, arg...) do { \
276 if ((dev) && (dev)->ipc_log_long) \
277 ipc_log_string((dev)->ipc_log_long, \
278 "INFO:%s: " fmt, __func__, arg); \
279 if ((dev) && (dev)->ipc_log) \
280 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
281 pr_info("%s: " fmt, __func__, arg); \
282 } while (0)
283
284#define PCIE_ERR(dev, fmt, arg...) do { \
285 if ((dev) && (dev)->ipc_log_long) \
286 ipc_log_string((dev)->ipc_log_long, \
287 "ERR:%s: " fmt, __func__, arg); \
288 if ((dev) && (dev)->ipc_log) \
289 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
290 pr_err("%s: " fmt, __func__, arg); \
291 } while (0)
292
293
294enum msm_pcie_res {
295 MSM_PCIE_RES_PARF,
296 MSM_PCIE_RES_PHY,
297 MSM_PCIE_RES_DM_CORE,
298 MSM_PCIE_RES_ELBI,
Tony Truongf49801f2017-10-25 11:22:35 -0700299 MSM_PCIE_RES_IATU,
Tony Truong349ee492014-10-01 17:35:56 -0700300 MSM_PCIE_RES_CONF,
301 MSM_PCIE_RES_IO,
302 MSM_PCIE_RES_BARS,
303 MSM_PCIE_RES_TCSR,
304 MSM_PCIE_MAX_RES,
305};
306
307enum msm_pcie_irq {
308 MSM_PCIE_INT_MSI,
309 MSM_PCIE_INT_A,
310 MSM_PCIE_INT_B,
311 MSM_PCIE_INT_C,
312 MSM_PCIE_INT_D,
313 MSM_PCIE_INT_PLS_PME,
314 MSM_PCIE_INT_PME_LEGACY,
315 MSM_PCIE_INT_PLS_ERR,
316 MSM_PCIE_INT_AER_LEGACY,
317 MSM_PCIE_INT_LINK_UP,
318 MSM_PCIE_INT_LINK_DOWN,
319 MSM_PCIE_INT_BRIDGE_FLUSH_N,
320 MSM_PCIE_INT_GLOBAL_INT,
321 MSM_PCIE_MAX_IRQ,
322};
323
324enum msm_pcie_irq_event {
325 MSM_PCIE_INT_EVT_LINK_DOWN = 1,
326 MSM_PCIE_INT_EVT_BME,
327 MSM_PCIE_INT_EVT_PM_TURNOFF,
328 MSM_PCIE_INT_EVT_DEBUG,
329 MSM_PCIE_INT_EVT_LTR,
330 MSM_PCIE_INT_EVT_MHI_Q6,
331 MSM_PCIE_INT_EVT_MHI_A7,
332 MSM_PCIE_INT_EVT_DSTATE_CHANGE,
333 MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
334 MSM_PCIE_INT_EVT_MMIO_WRITE,
335 MSM_PCIE_INT_EVT_CFG_WRITE,
336 MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
337 MSM_PCIE_INT_EVT_LINK_UP,
338 MSM_PCIE_INT_EVT_AER_LEGACY,
339 MSM_PCIE_INT_EVT_AER_ERR,
340 MSM_PCIE_INT_EVT_PME_LEGACY,
341 MSM_PCIE_INT_EVT_PLS_PME,
342 MSM_PCIE_INT_EVT_INTD,
343 MSM_PCIE_INT_EVT_INTC,
344 MSM_PCIE_INT_EVT_INTB,
345 MSM_PCIE_INT_EVT_INTA,
346 MSM_PCIE_INT_EVT_EDMA,
347 MSM_PCIE_INT_EVT_MSI_0,
348 MSM_PCIE_INT_EVT_MSI_1,
349 MSM_PCIE_INT_EVT_MSI_2,
350 MSM_PCIE_INT_EVT_MSI_3,
351 MSM_PCIE_INT_EVT_MSI_4,
352 MSM_PCIE_INT_EVT_MSI_5,
353 MSM_PCIE_INT_EVT_MSI_6,
354 MSM_PCIE_INT_EVT_MSI_7,
355 MSM_PCIE_INT_EVT_MAX = 30,
356};
357
358enum msm_pcie_gpio {
359 MSM_PCIE_GPIO_PERST,
360 MSM_PCIE_GPIO_WAKE,
361 MSM_PCIE_GPIO_EP,
362 MSM_PCIE_MAX_GPIO
363};
364
365enum msm_pcie_link_status {
366 MSM_PCIE_LINK_DEINIT,
367 MSM_PCIE_LINK_ENABLED,
368 MSM_PCIE_LINK_DISABLED
369};
370
Tony Truong9f2c7722017-02-28 15:02:27 -0800371enum msm_pcie_boot_option {
372 MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
373 MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
374};
375
Tony Truongbad3b742017-11-22 14:40:19 -0800376enum msm_pcie_debugfs_option {
377 MSM_PCIE_OUTPUT_PCIE_INFO,
378 MSM_PCIE_DISABLE_LINK,
379 MSM_PCIE_ENABLE_LINK,
380 MSM_PCIE_DISABLE_ENABLE_LINK,
381 MSM_PCIE_DUMP_SHADOW_REGISTER,
382 MSM_PCIE_DISABLE_L0S,
383 MSM_PCIE_ENABLE_L0S,
384 MSM_PCIE_DISABLE_L1,
385 MSM_PCIE_ENABLE_L1,
386 MSM_PCIE_DISABLE_L1SS,
387 MSM_PCIE_ENABLE_L1SS,
388 MSM_PCIE_ENUMERATION,
389 MSM_PCIE_READ_PCIE_REGISTER,
390 MSM_PCIE_WRITE_PCIE_REGISTER,
391 MSM_PCIE_DUMP_PCIE_REGISTER_SPACE,
392 MSM_PCIE_ALLOCATE_DDR_MAP_LBAR,
393 MSM_PCIE_FREE_DDR_UNMAP_LBAR,
394 MSM_PCIE_OUTPUT_DDR_LBAR_ADDRESS,
395 MSM_PCIE_CONFIGURE_LOOPBACK,
396 MSM_PCIE_SETUP_LOOPBACK_IATU,
397 MSM_PCIE_READ_DDR,
398 MSM_PCIE_READ_LBAR,
399 MSM_PCIE_WRITE_DDR,
400 MSM_PCIE_WRITE_LBAR,
401 MSM_PCIE_DISABLE_AER,
402 MSM_PCIE_ENABLE_AER,
403 MSM_PCIE_GPIO_STATUS,
404 MSM_PCIE_ASSERT_PERST,
405 MSM_PCIE_DEASSERT_PERST,
406 MSM_PCIE_KEEP_RESOURCES_ON,
407 MSM_PCIE_FORCE_GEN1,
408 MSM_PCIE_MAX_DEBUGFS_OPTION
409};
410
411static const char * const
412 msm_pcie_debugfs_option_desc[MSM_PCIE_MAX_DEBUGFS_OPTION] = {
413 "OUTPUT PCIE INFO",
414 "DISABLE LINK",
415 "ENABLE LINK",
416 "DISABLE AND ENABLE LINK",
417 "DUMP PCIE SHADOW REGISTER",
418 "DISABLE L0S",
419 "ENABLE L0S",
420 "DISABLE L1",
421 "ENABLE L1",
422 "DISABLE L1SS",
423 "ENABLE L1SS",
424 "ENUMERATE",
425 "READ A PCIE REGISTER",
426 "WRITE TO PCIE REGISTER",
427 "DUMP PCIE REGISTER SPACE",
428 "ALLOCATE DDR AND MAP LBAR",
429 "FREE DDR AND UNMAP LBAR",
430 "OUTPUT DDR AND LBAR VIR ADDRESS",
431 "CONFIGURE PCIE LOOPBACK",
432 "SETUP LOOPBACK IATU",
433 "READ DDR",
434 "READ LBAR",
435 "WRITE DDR",
436 "WRITE LBAR",
437 "SET AER ENABLE FLAG",
438 "CLEAR AER ENABLE FLAG",
439 "OUTPUT PERST AND WAKE GPIO STATUS",
440 "ASSERT PERST",
441 "DE-ASSERT PERST",
442 "SET KEEP_RESOURCES_ON FLAG",
443 "FORCE GEN 1 SPEED FOR LINK TRAINING"
444};
445
Tony Truong349ee492014-10-01 17:35:56 -0700446/* gpio info structure */
447struct msm_pcie_gpio_info_t {
448 char *name;
449 uint32_t num;
450 bool out;
451 uint32_t on;
452 uint32_t init;
453 bool required;
454};
455
456/* voltage regulator info structrue */
457struct msm_pcie_vreg_info_t {
458 struct regulator *hdl;
459 char *name;
460 uint32_t max_v;
461 uint32_t min_v;
462 uint32_t opt_mode;
463 bool required;
464};
465
466/* reset info structure */
467struct msm_pcie_reset_info_t {
468 struct reset_control *hdl;
469 char *name;
470 bool required;
471};
472
473/* clock info structure */
474struct msm_pcie_clk_info_t {
475 struct clk *hdl;
476 char *name;
477 u32 freq;
478 bool config_mem;
479 bool required;
480};
481
482/* resource info structure */
483struct msm_pcie_res_info_t {
484 char *name;
485 struct resource *resource;
486 void __iomem *base;
487};
488
489/* irq info structrue */
490struct msm_pcie_irq_info_t {
491 char *name;
492 uint32_t num;
493};
494
495/* phy info structure */
496struct msm_pcie_phy_info_t {
497 u32 offset;
498 u32 val;
499 u32 delay;
500};
501
502/* PCIe device info structure */
503struct msm_pcie_device_info {
504 u32 bdf;
505 struct pci_dev *dev;
506 short short_bdf;
507 u32 sid;
508 int domain;
509 void __iomem *conf_base;
510 unsigned long phy_address;
511 u32 dev_ctrlstts_offset;
512 struct msm_pcie_register_event *event_reg;
513 bool registered;
514};
515
516/* msm pcie device structure */
517struct msm_pcie_dev_t {
518 struct platform_device *pdev;
519 struct pci_dev *dev;
520 struct regulator *gdsc;
521 struct regulator *gdsc_smmu;
522 struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
523 struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
524 struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
525 struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
526 struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
527 struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
528 struct msm_pcie_irq_info_t msi[MSM_PCIE_MAX_MSI];
529 struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
530 struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
531
532 void __iomem *parf;
533 void __iomem *phy;
534 void __iomem *elbi;
Tony Truongf49801f2017-10-25 11:22:35 -0700535 void __iomem *iatu;
Tony Truong349ee492014-10-01 17:35:56 -0700536 void __iomem *dm_core;
537 void __iomem *conf;
538 void __iomem *bars;
539 void __iomem *tcsr;
540
541 uint32_t axi_bar_start;
542 uint32_t axi_bar_end;
543
544 struct resource *dev_mem_res;
545 struct resource *dev_io_res;
546
547 uint32_t wake_n;
548 uint32_t vreg_n;
549 uint32_t gpio_n;
550 uint32_t parf_deemph;
551 uint32_t parf_swing;
552
553 bool cfg_access;
554 spinlock_t cfg_lock;
555 unsigned long irqsave_flags;
556 struct mutex enumerate_lock;
557 struct mutex setup_lock;
558
559 struct irq_domain *irq_domain;
560 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
561 uint32_t msi_gicm_addr;
562 uint32_t msi_gicm_base;
563 bool use_msi;
564
565 enum msm_pcie_link_status link_status;
566 bool user_suspend;
567 bool disable_pc;
568 struct pci_saved_state *saved_state;
569
570 struct wakeup_source ws;
571 struct msm_bus_scale_pdata *bus_scale_table;
572 uint32_t bus_client;
573
574 bool l0s_supported;
575 bool l1_supported;
576 bool l1ss_supported;
577 bool common_clk_en;
578 bool clk_power_manage_en;
579 bool aux_clk_sync;
580 bool aer_enable;
581 bool smmu_exist;
582 uint32_t smmu_sid_base;
583 uint32_t n_fts;
Tony Truong24e02ba2017-08-30 14:53:14 -0700584 uint32_t max_link_speed;
Tony Truong349ee492014-10-01 17:35:56 -0700585 bool ext_ref_clk;
Tony Truong349ee492014-10-01 17:35:56 -0700586 uint32_t ep_latency;
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +0530587 uint32_t switch_latency;
Tony Truong349ee492014-10-01 17:35:56 -0700588 uint32_t wr_halt_size;
Tony Truong41e63ec2017-08-30 12:08:12 -0700589 uint32_t slv_addr_space_size;
Tony Truong2b675ba2017-12-12 14:52:00 -0800590 uint32_t phy_status_offset;
Tony Truong349ee492014-10-01 17:35:56 -0700591 uint32_t cpl_timeout;
592 uint32_t current_bdf;
Tony Truong349ee492014-10-01 17:35:56 -0700593 uint32_t perst_delay_us_min;
594 uint32_t perst_delay_us_max;
595 uint32_t tlp_rd_size;
596 bool linkdown_panic;
Tony Truong9f2c7722017-02-28 15:02:27 -0800597 uint32_t boot_option;
Tony Truong349ee492014-10-01 17:35:56 -0700598
599 uint32_t rc_idx;
600 uint32_t phy_ver;
601 bool drv_ready;
602 bool enumerated;
603 struct work_struct handle_wake_work;
604 struct mutex recovery_lock;
Tony Truong349ee492014-10-01 17:35:56 -0700605 spinlock_t wakeup_lock;
Tony Truongbab696f2017-11-15 16:38:51 -0800606 spinlock_t irq_lock;
Tony Truong349ee492014-10-01 17:35:56 -0700607 ulong linkdown_counter;
608 ulong link_turned_on_counter;
609 ulong link_turned_off_counter;
610 ulong rc_corr_counter;
611 ulong rc_non_fatal_counter;
612 ulong rc_fatal_counter;
613 ulong ep_corr_counter;
614 ulong ep_non_fatal_counter;
615 ulong ep_fatal_counter;
616 bool suspending;
617 ulong wake_counter;
618 u32 num_active_ep;
619 u32 num_ep;
620 bool pending_ep_reg;
621 u32 phy_len;
Tony Truong349ee492014-10-01 17:35:56 -0700622 struct msm_pcie_phy_info_t *phy_sequence;
Tony Truong349ee492014-10-01 17:35:56 -0700623 u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
624 u32 rc_shadow[PCIE_CONF_SPACE_DW];
625 bool shadow_en;
626 bool bridge_found;
627 struct msm_pcie_register_event *event_reg;
628 unsigned int scm_dev_id;
629 bool power_on;
630 void *ipc_log;
631 void *ipc_log_long;
632 void *ipc_log_dump;
633 bool use_19p2mhz_aux_clk;
634 bool use_pinctrl;
635 struct pinctrl *pinctrl;
636 struct pinctrl_state *pins_default;
637 struct pinctrl_state *pins_sleep;
638 struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
639};
640
Tony Truong349ee492014-10-01 17:35:56 -0700641/* debug mask sys interface */
642static int msm_pcie_debug_mask;
643module_param_named(debug_mask, msm_pcie_debug_mask,
644 int, 0644);
645
Tony Truong7416d722017-09-12 16:45:18 -0700646/*
647 * For each bit set, invert the default capability
648 * option for the corresponding root complex
649 * and its devices.
650 */
651static int msm_pcie_invert_l0s_support;
652module_param_named(invert_l0s_support, msm_pcie_invert_l0s_support,
653 int, 0644);
654static int msm_pcie_invert_l1_support;
655module_param_named(invert_l1_support, msm_pcie_invert_l1_support,
656 int, 0644);
657static int msm_pcie_invert_l1ss_support;
658module_param_named(invert_l1ss_support, msm_pcie_invert_l1ss_support,
659 int, 0644);
660static int msm_pcie_invert_aer_support;
661module_param_named(invert_aer_support, msm_pcie_invert_aer_support,
662 int, 0644);
663
664/*
665 * For each bit set, keep the resources on when link training fails
666 * or linkdown occurs for the corresponding root complex
667 */
668static int msm_pcie_keep_resources_on;
669module_param_named(keep_resources_on, msm_pcie_keep_resources_on,
670 int, 0644);
671
Tony Truongbad3b742017-11-22 14:40:19 -0800672/*
673 * For each bit set, force the corresponding root complex
674 * to do link training at gen1 speed.
675 */
676static int msm_pcie_force_gen1;
677module_param_named(force_gen1, msm_pcie_force_gen1,
678 int, 0644);
679
680
681/*
682 * For each bit set in BIT[3:0] determines which corresponding
683 * root complex will use the value in BIT[31:4] to override the
684 * default (LINK_UP_CHECK_MAX_COUNT) max check count for link training.
685 * Each iteration is LINK_UP_TIMEOUT_US_MIN long.
686 */
687static int msm_pcie_link_check_max_count;
688module_param_named(link_check_max_count, msm_pcie_link_check_max_count,
689 int, 0644);
690
Tony Truong349ee492014-10-01 17:35:56 -0700691/* debugfs values */
Tony Truongbad3b742017-11-22 14:40:19 -0800692static u32 rc_sel = BIT(0);
Tony Truong349ee492014-10-01 17:35:56 -0700693static u32 base_sel;
694static u32 wr_offset;
695static u32 wr_mask;
696static u32 wr_value;
Tony Truongbad3b742017-11-22 14:40:19 -0800697static u32 corr_counter_limit = 5;
Tony Truong349ee492014-10-01 17:35:56 -0700698
Tony Truong349ee492014-10-01 17:35:56 -0700699/* Table to track info of PCIe devices */
700static struct msm_pcie_device_info
701 msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
702
703/* PCIe driver state */
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700704static struct pcie_drv_sta {
Tony Truong349ee492014-10-01 17:35:56 -0700705 u32 rc_num;
706 struct mutex drv_lock;
707} pcie_drv;
708
709/* msm pcie device data */
710static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
711
712/* regulators */
713static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
714 {NULL, "vreg-3.3", 0, 0, 0, false},
715 {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
716 {NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
717 {NULL, "vreg-cx", 0, 0, 0, false}
718};
719
720/* GPIOs */
721static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
722 {"perst-gpio", 0, 1, 0, 0, 1},
723 {"wake-gpio", 0, 0, 0, 0, 0},
724 {"qcom,ep-gpio", 0, 1, 1, 0, 0}
725};
726
727/* resets */
728static struct msm_pcie_reset_info_t
729msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
730 {
Tony Truongb213ac12017-04-05 15:21:20 -0700731 {NULL, "pcie_0_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700732 {NULL, "pcie_phy_reset", false},
733 {NULL, "pcie_phy_com_reset", false},
734 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
735 {NULL, "pcie_0_phy_reset", false}
736 },
737 {
Tony Truongb213ac12017-04-05 15:21:20 -0700738 {NULL, "pcie_1_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700739 {NULL, "pcie_phy_reset", false},
740 {NULL, "pcie_phy_com_reset", false},
741 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
742 {NULL, "pcie_1_phy_reset", false}
743 },
744 {
Tony Truongb213ac12017-04-05 15:21:20 -0700745 {NULL, "pcie_2_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700746 {NULL, "pcie_phy_reset", false},
747 {NULL, "pcie_phy_com_reset", false},
748 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
749 {NULL, "pcie_2_phy_reset", false}
750 }
751};
752
753/* pipe reset */
754static struct msm_pcie_reset_info_t
755msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
756 {
757 {NULL, "pcie_0_phy_pipe_reset", false}
758 },
759 {
760 {NULL, "pcie_1_phy_pipe_reset", false}
761 },
762 {
763 {NULL, "pcie_2_phy_pipe_reset", false}
764 }
765};
766
767/* clocks */
768static struct msm_pcie_clk_info_t
769 msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
770 {
771 {NULL, "pcie_0_ref_clk_src", 0, false, false},
772 {NULL, "pcie_0_aux_clk", 1010000, false, true},
773 {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
774 {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
775 {NULL, "pcie_0_slv_axi_clk", 0, true, true},
776 {NULL, "pcie_0_ldo", 0, false, true},
777 {NULL, "pcie_0_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700778 {NULL, "pcie_0_slv_q2a_axi_clk", 0, false, false},
Tony Truong4c1b3be2017-12-12 11:06:18 -0800779 {NULL, "pcie_0_sleep_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700780 {NULL, "pcie_phy_refgen_clk", 0, false, false},
781 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700782 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
783 {NULL, "pcie_phy_aux_clk", 0, false, false}
784 },
785 {
786 {NULL, "pcie_1_ref_clk_src", 0, false, false},
787 {NULL, "pcie_1_aux_clk", 1010000, false, true},
788 {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
789 {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
790 {NULL, "pcie_1_slv_axi_clk", 0, true, true},
791 {NULL, "pcie_1_ldo", 0, false, true},
792 {NULL, "pcie_1_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700793 {NULL, "pcie_1_slv_q2a_axi_clk", 0, false, false},
Tony Truong4c1b3be2017-12-12 11:06:18 -0800794 {NULL, "pcie_1_sleep_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700795 {NULL, "pcie_phy_refgen_clk", 0, false, false},
796 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700797 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
798 {NULL, "pcie_phy_aux_clk", 0, false, false}
799 },
800 {
801 {NULL, "pcie_2_ref_clk_src", 0, false, false},
802 {NULL, "pcie_2_aux_clk", 1010000, false, true},
803 {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
804 {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
805 {NULL, "pcie_2_slv_axi_clk", 0, true, true},
806 {NULL, "pcie_2_ldo", 0, false, true},
807 {NULL, "pcie_2_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700808 {NULL, "pcie_2_slv_q2a_axi_clk", 0, false, false},
Tony Truong4c1b3be2017-12-12 11:06:18 -0800809 {NULL, "pcie_2_sleep_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700810 {NULL, "pcie_phy_refgen_clk", 0, false, false},
811 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700812 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
813 {NULL, "pcie_phy_aux_clk", 0, false, false}
814 }
815};
816
817/* Pipe Clocks */
818static struct msm_pcie_clk_info_t
819 msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
820 {
821 {NULL, "pcie_0_pipe_clk", 125000000, true, true},
822 },
823 {
824 {NULL, "pcie_1_pipe_clk", 125000000, true, true},
825 },
826 {
827 {NULL, "pcie_2_pipe_clk", 125000000, true, true},
828 }
829};
830
831/* resources */
832static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700833 {"parf", NULL, NULL},
834 {"phy", NULL, NULL},
835 {"dm_core", NULL, NULL},
836 {"elbi", NULL, NULL},
Tony Truongf49801f2017-10-25 11:22:35 -0700837 {"iatu", NULL, NULL},
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700838 {"conf", NULL, NULL},
839 {"io", NULL, NULL},
840 {"bars", NULL, NULL},
841 {"tcsr", NULL, NULL}
Tony Truong349ee492014-10-01 17:35:56 -0700842};
843
844/* irqs */
845static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
846 {"int_msi", 0},
847 {"int_a", 0},
848 {"int_b", 0},
849 {"int_c", 0},
850 {"int_d", 0},
851 {"int_pls_pme", 0},
852 {"int_pme_legacy", 0},
853 {"int_pls_err", 0},
854 {"int_aer_legacy", 0},
855 {"int_pls_link_up", 0},
856 {"int_pls_link_down", 0},
857 {"int_bridge_flush_n", 0},
858 {"int_global_int", 0}
859};
860
861/* MSIs */
862static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
863 {"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
864 {"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
865 {"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
866 {"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
867 {"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
868 {"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
869 {"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
870 {"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
871};
872
Tony Truong7772e692017-04-13 17:03:34 -0700873static int msm_pcie_config_device(struct pci_dev *dev, void *pdev);
Tony Truongbad3b742017-11-22 14:40:19 -0800874static int msm_pcie_config_l0s_disable(struct pci_dev *dev, void *pdev);
875static int msm_pcie_config_l0s_enable(struct pci_dev *dev, void *pdev);
876static int msm_pcie_config_l1_disable(struct pci_dev *dev, void *pdev);
877static int msm_pcie_config_l1_enable(struct pci_dev *dev, void *pdev);
878static int msm_pcie_config_l1ss_disable(struct pci_dev *dev, void *pdev);
879static int msm_pcie_config_l1ss_enable(struct pci_dev *dev, void *pdev);
Tony Truongb1af8b62017-05-31 15:40:38 -0700880static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev,
881 struct pci_dev *pdev, bool enable);
Tony Truong7772e692017-04-13 17:03:34 -0700882
Tony Truong349ee492014-10-01 17:35:56 -0700883#ifdef CONFIG_ARM
Tony Truong349ee492014-10-01 17:35:56 -0700884static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
885{
886 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
887}
888#else
Tony Truong349ee492014-10-01 17:35:56 -0700889static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
890{
891}
892#endif
893
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700894static inline void msm_pcie_write_reg(void __iomem *base, u32 offset, u32 value)
Tony Truong349ee492014-10-01 17:35:56 -0700895{
896 writel_relaxed(value, base + offset);
897 /* ensure that changes propagated to the hardware */
898 wmb();
899}
900
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700901static inline void msm_pcie_write_reg_field(void __iomem *base, u32 offset,
Tony Truong349ee492014-10-01 17:35:56 -0700902 const u32 mask, u32 val)
903{
904 u32 shift = find_first_bit((void *)&mask, 32);
905 u32 tmp = readl_relaxed(base + offset);
906
907 tmp &= ~mask; /* clear written bits */
908 val = tmp | (val << shift);
909 writel_relaxed(val, base + offset);
910 /* ensure that changes propagated to the hardware */
911 wmb();
912}
913
Tony Truongb1af8b62017-05-31 15:40:38 -0700914static inline void msm_pcie_config_clear_set_dword(struct pci_dev *pdev,
915 int pos, u32 clear, u32 set)
916{
917 u32 val;
918
919 pci_read_config_dword(pdev, pos, &val);
920 val &= ~clear;
921 val |= set;
922 pci_write_config_dword(pdev, pos, val);
923}
924
Tony Truong349ee492014-10-01 17:35:56 -0700925static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
926 struct msm_pcie_clk_info_t *info)
927{
928 int ret;
929
930 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
931 if (ret)
932 PCIE_ERR(dev,
933 "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
934 dev->rc_idx, info->name, ret);
935 else
936 PCIE_DBG2(dev,
937 "PCIe: RC%d configured core memory for clk %s.\n",
938 dev->rc_idx, info->name);
939
940 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
941 if (ret)
942 PCIE_ERR(dev,
943 "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
944 dev->rc_idx, info->name, ret);
945 else
946 PCIE_DBG2(dev,
947 "PCIe: RC%d configured peripheral memory for clk %s.\n",
948 dev->rc_idx, info->name);
949}
950
Tony Truong349ee492014-10-01 17:35:56 -0700951static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
952{
953 int i, size;
Tony Truong349ee492014-10-01 17:35:56 -0700954
955 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
956 for (i = 0; i < size; i += 32) {
957 PCIE_DUMP(dev,
958 "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
959 dev->rc_idx, i,
960 readl_relaxed(dev->phy + i),
961 readl_relaxed(dev->phy + (i + 4)),
962 readl_relaxed(dev->phy + (i + 8)),
963 readl_relaxed(dev->phy + (i + 12)),
964 readl_relaxed(dev->phy + (i + 16)),
965 readl_relaxed(dev->phy + (i + 20)),
966 readl_relaxed(dev->phy + (i + 24)),
967 readl_relaxed(dev->phy + (i + 28)));
968 }
969}
970
Tony Truong349ee492014-10-01 17:35:56 -0700971static void pcie_phy_init(struct msm_pcie_dev_t *dev)
972{
973 int i;
974 struct msm_pcie_phy_info_t *phy_seq;
975
976 PCIE_DBG(dev,
977 "RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
978 dev->rc_idx);
979
980 if (dev->phy_sequence) {
981 i = dev->phy_len;
982 phy_seq = dev->phy_sequence;
983 while (i--) {
984 msm_pcie_write_reg(dev->phy,
985 phy_seq->offset,
986 phy_seq->val);
987 if (phy_seq->delay)
988 usleep_range(phy_seq->delay,
989 phy_seq->delay + 1);
990 phy_seq++;
991 }
Tony Truong349ee492014-10-01 17:35:56 -0700992 }
993}
994
Tony Truong349ee492014-10-01 17:35:56 -0700995static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
996{
Tony Truong2b675ba2017-12-12 14:52:00 -0800997 if (readl_relaxed(dev->phy + dev->phy_status_offset) & BIT(6))
Tony Truong349ee492014-10-01 17:35:56 -0700998 return false;
999 else
1000 return true;
1001}
Tony Truong349ee492014-10-01 17:35:56 -07001002
1003static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
1004{
1005 int ret, scm_ret;
1006
1007 if (!dev) {
1008 pr_err("PCIe: the input pcie dev is NULL.\n");
1009 return -ENODEV;
1010 }
1011
1012 ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
1013 if (ret || scm_ret) {
1014 PCIE_ERR(dev,
1015 "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
1016 dev->rc_idx, ret, scm_ret);
1017 return ret ? ret : -EINVAL;
1018 }
1019
1020 return 0;
1021}
1022
1023static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
1024 u32 offset)
1025{
1026 if (offset % 4) {
1027 PCIE_ERR(dev,
1028 "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
1029 dev->rc_idx, offset);
1030 return MSM_PCIE_ERROR;
1031 }
1032
1033 return 0;
1034}
1035
1036static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
1037 bool check_sw_stts,
1038 bool check_ep,
1039 void __iomem *ep_conf)
1040{
1041 u32 val;
1042
1043 if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
1044 PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
1045 dev->rc_idx);
1046 return false;
1047 }
1048
1049 if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
1050 PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
1051 dev->rc_idx);
1052 return false;
1053 }
1054
1055 val = readl_relaxed(dev->dm_core);
1056 PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
1057 dev->rc_idx, val);
1058 if (val == PCIE_LINK_DOWN) {
1059 PCIE_ERR(dev,
1060 "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
1061 dev->rc_idx, dev->rc_idx, val);
1062 return false;
1063 }
1064
1065 if (check_ep) {
1066 val = readl_relaxed(ep_conf);
1067 PCIE_DBG(dev,
1068 "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
1069 dev->rc_idx, val);
1070 if (val == PCIE_LINK_DOWN) {
1071 PCIE_ERR(dev,
1072 "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
1073 dev->rc_idx, dev->rc_idx, val);
1074 return false;
1075 }
1076 }
1077
1078 return true;
1079}
1080
1081static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
1082{
1083 int i, j;
1084 u32 val = 0;
1085 u32 *shadow;
Stephen Boydb5b8fc32017-06-21 08:59:11 -07001086 void __iomem *cfg = dev->conf;
Tony Truong349ee492014-10-01 17:35:56 -07001087
1088 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1089 if (!rc && !dev->pcidev_table[i].bdf)
1090 break;
1091 if (rc) {
1092 cfg = dev->dm_core;
1093 shadow = dev->rc_shadow;
1094 } else {
1095 if (!msm_pcie_confirm_linkup(dev, false, true,
1096 dev->pcidev_table[i].conf_base))
1097 continue;
1098
1099 shadow = dev->ep_shadow[i];
1100 PCIE_DBG(dev,
1101 "PCIe Device: %02x:%02x.%01x\n",
1102 dev->pcidev_table[i].bdf >> 24,
1103 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1104 dev->pcidev_table[i].bdf >> 16 & 0x07);
1105 }
1106 for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
1107 val = shadow[j];
1108 if (val != PCIE_CLEAR) {
1109 PCIE_DBG3(dev,
1110 "PCIe: before recovery:cfg 0x%x:0x%x\n",
1111 j * 4, readl_relaxed(cfg + j * 4));
1112 PCIE_DBG3(dev,
1113 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1114 j, j * 4, val);
1115 writel_relaxed(val, cfg + j * 4);
1116 /* ensure changes propagated to the hardware */
1117 wmb();
1118 PCIE_DBG3(dev,
1119 "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
1120 j * 4, readl_relaxed(cfg + j * 4));
1121 }
1122 }
1123 if (rc)
1124 break;
1125
1126 pci_save_state(dev->pcidev_table[i].dev);
1127 cfg += SZ_4K;
1128 }
1129}
1130
1131static void msm_pcie_write_mask(void __iomem *addr,
1132 uint32_t clear_mask, uint32_t set_mask)
1133{
1134 uint32_t val;
1135
1136 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
1137 writel_relaxed(val, addr);
1138 wmb(); /* ensure data is written to hardware register */
1139}
1140
1141static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
1142{
1143 int i, size;
1144 u32 original;
1145
1146 PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
1147
1148 original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
1149 for (i = 1; i <= 0x1A; i++) {
1150 msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
1151 0xFF0000, i << 16);
1152 PCIE_DUMP(dev,
1153 "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
1154 dev->rc_idx,
1155 readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
1156 readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
1157 }
1158 writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
1159
1160 PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
1161
1162 size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
1163 for (i = 0; i < size; i += 32) {
1164 PCIE_DUMP(dev,
1165 "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1166 dev->rc_idx, i,
1167 readl_relaxed(dev->parf + i),
1168 readl_relaxed(dev->parf + (i + 4)),
1169 readl_relaxed(dev->parf + (i + 8)),
1170 readl_relaxed(dev->parf + (i + 12)),
1171 readl_relaxed(dev->parf + (i + 16)),
1172 readl_relaxed(dev->parf + (i + 20)),
1173 readl_relaxed(dev->parf + (i + 24)),
1174 readl_relaxed(dev->parf + (i + 28)));
1175 }
1176}
1177
1178static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
1179{
1180 PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
1181 dev->rc_idx, dev->enumerated ? "" : "not");
1182 PCIE_DBG_FS(dev, "PCIe: link is %s\n",
1183 (dev->link_status == MSM_PCIE_LINK_ENABLED)
1184 ? "enabled" : "disabled");
1185 PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
1186 dev->cfg_access ? "" : "not");
1187 PCIE_DBG_FS(dev, "use_msi is %d\n",
1188 dev->use_msi);
1189 PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
1190 dev->use_pinctrl);
1191 PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
1192 dev->use_19p2mhz_aux_clk);
1193 PCIE_DBG_FS(dev, "user_suspend is %d\n",
1194 dev->user_suspend);
1195 PCIE_DBG_FS(dev, "num_ep: %d\n",
1196 dev->num_ep);
1197 PCIE_DBG_FS(dev, "num_active_ep: %d\n",
1198 dev->num_active_ep);
1199 PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
1200 dev->pending_ep_reg ? "true" : "false");
1201 PCIE_DBG_FS(dev, "phy_len is %d",
1202 dev->phy_len);
Tony Truong349ee492014-10-01 17:35:56 -07001203 PCIE_DBG_FS(dev, "disable_pc is %d",
1204 dev->disable_pc);
1205 PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
1206 dev->l0s_supported ? "" : "not");
1207 PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
1208 dev->l1_supported ? "" : "not");
1209 PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
1210 dev->l1ss_supported ? "" : "not");
1211 PCIE_DBG_FS(dev, "common_clk_en is %d\n",
1212 dev->common_clk_en);
1213 PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
1214 dev->clk_power_manage_en);
1215 PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
1216 dev->aux_clk_sync);
1217 PCIE_DBG_FS(dev, "AER is %s enable\n",
1218 dev->aer_enable ? "" : "not");
1219 PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
1220 dev->ext_ref_clk);
Tony Truong9f2c7722017-02-28 15:02:27 -08001221 PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
1222 dev->boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07001223 PCIE_DBG_FS(dev, "phy_ver is %d\n",
1224 dev->phy_ver);
1225 PCIE_DBG_FS(dev, "drv_ready is %d\n",
1226 dev->drv_ready);
1227 PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
1228 dev->linkdown_panic);
1229 PCIE_DBG_FS(dev, "the link is %s suspending\n",
1230 dev->suspending ? "" : "not");
1231 PCIE_DBG_FS(dev, "shadow is %s enabled\n",
1232 dev->shadow_en ? "" : "not");
1233 PCIE_DBG_FS(dev, "the power of RC is %s on\n",
1234 dev->power_on ? "" : "not");
1235 PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
1236 dev->msi_gicm_addr);
1237 PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
1238 dev->msi_gicm_base);
1239 PCIE_DBG_FS(dev, "bus_client: %d\n",
1240 dev->bus_client);
Tony Truong349ee492014-10-01 17:35:56 -07001241 PCIE_DBG_FS(dev, "smmu does %s exist\n",
1242 dev->smmu_exist ? "" : "not");
1243 PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
1244 dev->smmu_sid_base);
1245 PCIE_DBG_FS(dev, "n_fts: %d\n",
1246 dev->n_fts);
Tony Truong349ee492014-10-01 17:35:56 -07001247 PCIE_DBG_FS(dev, "ep_latency: %dms\n",
1248 dev->ep_latency);
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +05301249 PCIE_DBG_FS(dev, "switch_latency: %dms\n",
1250 dev->switch_latency);
Tony Truong349ee492014-10-01 17:35:56 -07001251 PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
1252 dev->wr_halt_size);
Tony Truong41e63ec2017-08-30 12:08:12 -07001253 PCIE_DBG_FS(dev, "slv_addr_space_size: 0x%x\n",
1254 dev->slv_addr_space_size);
Tony Truong2b675ba2017-12-12 14:52:00 -08001255 PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n",
1256 dev->phy_status_offset);
Tony Truong349ee492014-10-01 17:35:56 -07001257 PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
1258 dev->cpl_timeout);
1259 PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
1260 dev->current_bdf);
1261 PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
1262 dev->perst_delay_us_min);
1263 PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
1264 dev->perst_delay_us_max);
1265 PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
1266 dev->tlp_rd_size);
1267 PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
1268 dev->rc_corr_counter);
1269 PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
1270 dev->rc_non_fatal_counter);
1271 PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
1272 dev->rc_fatal_counter);
1273 PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
1274 dev->ep_corr_counter);
1275 PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
1276 dev->ep_non_fatal_counter);
1277 PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
1278 dev->ep_fatal_counter);
1279 PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
1280 dev->linkdown_counter);
1281 PCIE_DBG_FS(dev, "wake_counter: %lu\n",
1282 dev->wake_counter);
Tony Truong24e02ba2017-08-30 14:53:14 -07001283 PCIE_DBG_FS(dev, "max_link_speed: 0x%x\n",
1284 dev->max_link_speed);
Tony Truong349ee492014-10-01 17:35:56 -07001285 PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
1286 dev->link_turned_on_counter);
1287 PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
1288 dev->link_turned_off_counter);
1289}
1290
1291static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
1292{
1293 int i, j;
1294 u32 val = 0;
1295 u32 *shadow;
1296
1297 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1298 if (!rc && !dev->pcidev_table[i].bdf)
1299 break;
1300 if (rc) {
1301 shadow = dev->rc_shadow;
1302 } else {
1303 shadow = dev->ep_shadow[i];
1304 PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
1305 dev->pcidev_table[i].bdf >> 24,
1306 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1307 dev->pcidev_table[i].bdf >> 16 & 0x07);
1308 }
1309 for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
1310 val = shadow[j];
1311 if (val != PCIE_CLEAR) {
1312 PCIE_DBG_FS(dev,
1313 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1314 j, j * 4, val);
1315 }
1316 }
1317 if (rc)
1318 break;
1319 }
1320}
1321
1322static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
1323 u32 testcase)
1324{
Tony Truong09223e42017-11-08 16:50:20 -08001325 u32 dbi_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
Tony Truong06ff2ed2017-01-15 19:28:13 -08001326 phys_addr_t loopback_lbar_phy =
Tony Truong09223e42017-11-08 16:50:20 -08001327 dev->res[MSM_PCIE_RES_DM_CORE].resource->start +
1328 LOOPBACK_BASE_ADDR_OFFSET;
Tony Truong06ff2ed2017-01-15 19:28:13 -08001329 static uint32_t loopback_val = 0x1;
Tony Truong09223e42017-11-08 16:50:20 -08001330 static dma_addr_t loopback_ddr_phy;
Tony Truong06ff2ed2017-01-15 19:28:13 -08001331 static uint32_t *loopback_ddr_vir;
1332 static void __iomem *loopback_lbar_vir;
Tony Truong349ee492014-10-01 17:35:56 -07001333 int ret, i;
1334 u32 base_sel_size = 0;
Tony Truong349ee492014-10-01 17:35:56 -07001335
1336 switch (testcase) {
Tony Truongbad3b742017-11-22 14:40:19 -08001337 case MSM_PCIE_OUTPUT_PCIE_INFO:
Tony Truong349ee492014-10-01 17:35:56 -07001338 PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
1339 dev->rc_idx);
1340 msm_pcie_show_status(dev);
1341 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001342 case MSM_PCIE_DISABLE_LINK:
Tony Truong349ee492014-10-01 17:35:56 -07001343 PCIE_DBG_FS(dev,
1344 "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
1345 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
1346 dev->dev, NULL,
1347 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1348 if (ret)
1349 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
1350 __func__);
1351 else
1352 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
1353 __func__);
1354 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001355 case MSM_PCIE_ENABLE_LINK:
Tony Truong349ee492014-10-01 17:35:56 -07001356 PCIE_DBG_FS(dev,
1357 "\n\nPCIe: RC%d: enable link and recover config space\n\n",
1358 dev->rc_idx);
1359 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
1360 dev->dev, NULL,
1361 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1362 if (ret)
1363 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
1364 __func__);
1365 else {
1366 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
1367 msm_pcie_recover_config(dev->dev);
1368 }
1369 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001370 case MSM_PCIE_DISABLE_ENABLE_LINK:
Tony Truong349ee492014-10-01 17:35:56 -07001371 PCIE_DBG_FS(dev,
1372 "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
1373 dev->rc_idx);
1374 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
1375 dev->dev, NULL,
1376 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1377 if (ret)
1378 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
1379 __func__);
1380 else
1381 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
1382 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
1383 dev->dev, NULL,
1384 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1385 if (ret)
1386 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
1387 __func__);
1388 else {
1389 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
1390 msm_pcie_recover_config(dev->dev);
1391 }
1392 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001393 case MSM_PCIE_DUMP_SHADOW_REGISTER:
Tony Truong349ee492014-10-01 17:35:56 -07001394 PCIE_DBG_FS(dev,
1395 "\n\nPCIe: RC%d: dumping RC shadow registers\n",
1396 dev->rc_idx);
1397 msm_pcie_shadow_dump(dev, true);
1398
1399 PCIE_DBG_FS(dev,
1400 "\n\nPCIe: RC%d: dumping EP shadow registers\n",
1401 dev->rc_idx);
1402 msm_pcie_shadow_dump(dev, false);
1403 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001404 case MSM_PCIE_DISABLE_L0S:
Tony Truong349ee492014-10-01 17:35:56 -07001405 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
1406 dev->rc_idx);
Tony Truongbad3b742017-11-22 14:40:19 -08001407 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
1408 struct pci_bus *bus, *c_bus;
1409 struct list_head *children = &dev->dev->bus->children;
1410
1411 msm_pcie_config_l0s_disable(dev->dev, dev);
1412
1413 list_for_each_entry_safe(bus, c_bus, children, node)
1414 pci_walk_bus(bus,
1415 &msm_pcie_config_l0s_disable, dev);
Tony Truong349ee492014-10-01 17:35:56 -07001416 }
Tony Truongbad3b742017-11-22 14:40:19 -08001417 dev->l0s_supported = false;
Tony Truong349ee492014-10-01 17:35:56 -07001418 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001419 case MSM_PCIE_ENABLE_L0S:
Tony Truong349ee492014-10-01 17:35:56 -07001420 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
1421 dev->rc_idx);
Tony Truongbad3b742017-11-22 14:40:19 -08001422 dev->l0s_supported = true;
1423 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
1424 struct pci_bus *bus, *c_bus;
1425 struct list_head *children = &dev->dev->bus->children;
1426
1427 list_for_each_entry_safe(bus, c_bus, children, node)
1428 pci_walk_bus(bus,
1429 &msm_pcie_config_l0s_enable, dev);
1430
1431 msm_pcie_config_l0s_enable(dev->dev, dev);
Tony Truong349ee492014-10-01 17:35:56 -07001432 }
Tony Truong349ee492014-10-01 17:35:56 -07001433 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001434 case MSM_PCIE_DISABLE_L1:
Tony Truong349ee492014-10-01 17:35:56 -07001435 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
1436 dev->rc_idx);
Tony Truongbad3b742017-11-22 14:40:19 -08001437 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
1438 struct pci_bus *bus, *c_bus;
1439 struct list_head *children = &dev->dev->bus->children;
1440
1441 msm_pcie_config_l1_disable(dev->dev, dev);
1442
1443 list_for_each_entry_safe(bus, c_bus, children, node)
1444 pci_walk_bus(bus,
1445 &msm_pcie_config_l1_disable, dev);
Tony Truong349ee492014-10-01 17:35:56 -07001446 }
Tony Truongbad3b742017-11-22 14:40:19 -08001447 dev->l1_supported = false;
Tony Truong349ee492014-10-01 17:35:56 -07001448 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001449 case MSM_PCIE_ENABLE_L1:
Tony Truong349ee492014-10-01 17:35:56 -07001450 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
1451 dev->rc_idx);
Tony Truongbad3b742017-11-22 14:40:19 -08001452 dev->l1_supported = true;
1453 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
1454 struct pci_bus *bus, *c_bus;
1455 struct list_head *children = &dev->dev->bus->children;
1456
1457 list_for_each_entry_safe(bus, c_bus, children, node)
1458 pci_walk_bus(bus,
1459 &msm_pcie_config_l1_enable, dev);
1460
1461 msm_pcie_config_l1_enable(dev->dev, dev);
Tony Truong349ee492014-10-01 17:35:56 -07001462 }
Tony Truong349ee492014-10-01 17:35:56 -07001463 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001464 case MSM_PCIE_DISABLE_L1SS:
Tony Truong349ee492014-10-01 17:35:56 -07001465 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
1466 dev->rc_idx);
Tony Truongbad3b742017-11-22 14:40:19 -08001467 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
1468 struct pci_bus *bus, *c_bus;
1469 struct list_head *children = &dev->dev->bus->children;
Tony Truong349ee492014-10-01 17:35:56 -07001470
Tony Truongbad3b742017-11-22 14:40:19 -08001471 msm_pcie_config_l1ss_disable(dev->dev, dev);
Tony Truong349ee492014-10-01 17:35:56 -07001472
Tony Truongbad3b742017-11-22 14:40:19 -08001473 list_for_each_entry_safe(bus, c_bus, children, node)
1474 pci_walk_bus(bus,
1475 &msm_pcie_config_l1ss_disable, dev);
Tony Truong349ee492014-10-01 17:35:56 -07001476 }
Tony Truongbad3b742017-11-22 14:40:19 -08001477 dev->l1ss_supported = false;
Tony Truong349ee492014-10-01 17:35:56 -07001478 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001479 case MSM_PCIE_ENABLE_L1SS:
Tony Truong349ee492014-10-01 17:35:56 -07001480 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
1481 dev->rc_idx);
Tony Truongbad3b742017-11-22 14:40:19 -08001482 dev->l1ss_supported = true;
1483 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
1484 struct pci_bus *bus, *c_bus;
1485 struct list_head *children = &dev->dev->bus->children;
1486
1487 list_for_each_entry_safe(bus, c_bus, children, node)
1488 pci_walk_bus(bus,
1489 &msm_pcie_config_l1ss_enable, dev);
1490
1491 msm_pcie_config_l1ss_enable(dev->dev, dev);
Tony Truong349ee492014-10-01 17:35:56 -07001492 }
Tony Truong349ee492014-10-01 17:35:56 -07001493 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001494 case MSM_PCIE_ENUMERATION:
Tony Truong349ee492014-10-01 17:35:56 -07001495 PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
1496 dev->rc_idx);
1497 if (dev->enumerated)
1498 PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
1499 dev->rc_idx);
1500 else {
1501 if (!msm_pcie_enumerate(dev->rc_idx))
1502 PCIE_DBG_FS(dev,
1503 "PCIe: RC%d is successfully enumerated\n",
1504 dev->rc_idx);
1505 else
1506 PCIE_DBG_FS(dev,
1507 "PCIe: RC%d enumeration failed\n",
1508 dev->rc_idx);
1509 }
1510 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001511 case MSM_PCIE_READ_PCIE_REGISTER:
1512 PCIE_DBG_FS(dev,
1513 "\n\nPCIe: RC%d: read a PCIe register\n\n",
1514 dev->rc_idx);
1515 if (!base_sel) {
1516 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
1517 break;
1518 }
1519
1520 PCIE_DBG_FS(dev, "base: %s: 0x%pK\nwr_offset: 0x%x\n",
1521 dev->res[base_sel - 1].name,
1522 dev->res[base_sel - 1].base,
1523 wr_offset);
1524
1525 base_sel_size = resource_size(dev->res[base_sel - 1].resource);
1526
1527 if (wr_offset > base_sel_size - 4 ||
1528 msm_pcie_check_align(dev, wr_offset)) {
1529 PCIE_DBG_FS(dev,
1530 "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
1531 dev->rc_idx, wr_offset, base_sel_size - 4);
1532 } else {
1533 phys_addr_t wr_register =
1534 dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
1535
1536 wr_register += wr_offset;
1537 PCIE_DBG_FS(dev,
1538 "PCIe: RC%d: register: 0x%pa value: 0x%x\n",
1539 dev->rc_idx, &wr_register,
1540 readl_relaxed(dev->res[base_sel - 1].base +
1541 wr_offset));
1542 }
1543
1544 break;
1545 case MSM_PCIE_WRITE_PCIE_REGISTER:
Tony Truong349ee492014-10-01 17:35:56 -07001546 PCIE_DBG_FS(dev,
1547 "\n\nPCIe: RC%d: writing a value to a register\n\n",
1548 dev->rc_idx);
1549
1550 if (!base_sel) {
1551 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
1552 break;
1553 }
1554
1555 PCIE_DBG_FS(dev,
Tony Truongbad3b742017-11-22 14:40:19 -08001556 "base: %s: 0x%pK\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
Tony Truong349ee492014-10-01 17:35:56 -07001557 dev->res[base_sel - 1].name,
1558 dev->res[base_sel - 1].base,
1559 wr_offset, wr_mask, wr_value);
1560
Tony Truong95747382017-01-06 14:03:03 -08001561 base_sel_size = resource_size(dev->res[base_sel - 1].resource);
1562
1563 if (wr_offset > base_sel_size - 4 ||
1564 msm_pcie_check_align(dev, wr_offset))
1565 PCIE_DBG_FS(dev,
1566 "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
1567 dev->rc_idx, wr_offset, base_sel_size - 4);
1568 else
1569 msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
1570 wr_offset, wr_mask, wr_value);
Tony Truong349ee492014-10-01 17:35:56 -07001571
1572 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001573 case MSM_PCIE_DUMP_PCIE_REGISTER_SPACE:
Tony Truong349ee492014-10-01 17:35:56 -07001574 if (!base_sel) {
1575 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
1576 break;
1577 } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
1578 pcie_parf_dump(dev);
1579 break;
1580 } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
1581 pcie_phy_dump(dev);
1582 break;
1583 } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
1584 base_sel_size = 0x1000;
1585 } else {
1586 base_sel_size = resource_size(
1587 dev->res[base_sel - 1].resource);
1588 }
1589
1590 PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
1591 dev->res[base_sel - 1].name, dev->rc_idx);
1592
1593 for (i = 0; i < base_sel_size; i += 32) {
1594 PCIE_DBG_FS(dev,
1595 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1596 i, readl_relaxed(dev->res[base_sel - 1].base + i),
1597 readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
1598 readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
1599 readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
1600 readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
1601 readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
1602 readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
1603 readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
1604 }
1605 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001606 case MSM_PCIE_ALLOCATE_DDR_MAP_LBAR:
Tony Truong06ff2ed2017-01-15 19:28:13 -08001607 PCIE_DBG_FS(dev,
1608 "PCIe: RC%d: Allocate 4K DDR memory and map LBAR.\n",
1609 dev->rc_idx);
1610 loopback_ddr_vir = dma_alloc_coherent(&dev->pdev->dev,
1611 (SZ_1K * sizeof(*loopback_ddr_vir)),
1612 &loopback_ddr_phy, GFP_KERNEL);
1613 if (!loopback_ddr_vir) {
1614 PCIE_DBG_FS(dev,
1615 "PCIe: RC%d: failed to dma_alloc_coherent.\n",
1616 dev->rc_idx);
1617 } else {
1618 PCIE_DBG_FS(dev,
1619 "PCIe: RC%d: VIR DDR memory address: 0x%pK\n",
1620 dev->rc_idx, loopback_ddr_vir);
1621 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001622 "PCIe: RC%d: PHY DDR memory address: %pad\n",
1623 dev->rc_idx, &loopback_ddr_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001624 }
1625
Tony Truong09223e42017-11-08 16:50:20 -08001626 PCIE_DBG_FS(dev, "PCIe: RC%d: map LBAR: %pa\n",
1627 dev->rc_idx, &loopback_lbar_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001628 loopback_lbar_vir = devm_ioremap(&dev->pdev->dev,
1629 loopback_lbar_phy, SZ_4K);
1630 if (!loopback_lbar_vir) {
Tony Truong09223e42017-11-08 16:50:20 -08001631 PCIE_DBG_FS(dev, "PCIe: RC%d: failed to map %pa\n",
1632 dev->rc_idx, &loopback_lbar_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001633 } else {
1634 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001635 "PCIe: RC%d: successfully mapped %pa to 0x%pK\n",
1636 dev->rc_idx, &loopback_lbar_phy,
Tony Truong06ff2ed2017-01-15 19:28:13 -08001637 loopback_lbar_vir);
1638 }
1639 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001640 case MSM_PCIE_FREE_DDR_UNMAP_LBAR:
Tony Truong06ff2ed2017-01-15 19:28:13 -08001641 PCIE_DBG_FS(dev,
1642 "PCIe: RC%d: Release 4K DDR memory and unmap LBAR.\n",
1643 dev->rc_idx);
1644
1645 if (loopback_ddr_vir) {
1646 dma_free_coherent(&dev->pdev->dev, SZ_4K,
1647 loopback_ddr_vir, loopback_ddr_phy);
1648 loopback_ddr_vir = NULL;
1649 }
1650
1651 if (loopback_lbar_vir) {
1652 devm_iounmap(&dev->pdev->dev,
1653 loopback_lbar_vir);
1654 loopback_lbar_vir = NULL;
1655 }
1656 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001657 case MSM_PCIE_OUTPUT_DDR_LBAR_ADDRESS:
Tony Truong06ff2ed2017-01-15 19:28:13 -08001658 PCIE_DBG_FS(dev,
1659 "PCIe: RC%d: Print DDR and LBAR addresses.\n",
1660 dev->rc_idx);
1661
1662 if (!loopback_ddr_vir || !loopback_lbar_vir) {
1663 PCIE_DBG_FS(dev,
1664 "PCIe: RC%d: DDR or LBAR address is not mapped\n",
1665 dev->rc_idx);
1666 break;
1667 }
1668
1669 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001670 "PCIe: RC%d: PHY DDR address: %pad\n",
1671 dev->rc_idx, &loopback_ddr_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001672 PCIE_DBG_FS(dev,
1673 "PCIe: RC%d: VIR DDR address: 0x%pK\n",
1674 dev->rc_idx, loopback_ddr_vir);
1675 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001676 "PCIe: RC%d: PHY LBAR address: %pa\n",
1677 dev->rc_idx, &loopback_lbar_phy);
Tony Truong06ff2ed2017-01-15 19:28:13 -08001678 PCIE_DBG_FS(dev,
1679 "PCIe: RC%d: VIR LBAR address: 0x%pK\n",
1680 dev->rc_idx, loopback_lbar_vir);
1681 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001682 case MSM_PCIE_CONFIGURE_LOOPBACK:
Tony Truong06ff2ed2017-01-15 19:28:13 -08001683 PCIE_DBG_FS(dev,
1684 "PCIe: RC%d: Configure Loopback.\n",
1685 dev->rc_idx);
1686
1687 writel_relaxed(0x10000,
1688 dev->dm_core + PCIE20_GEN3_RELATED_REG);
1689 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001690 "PCIe: RC%d: 0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001691 dev->rc_idx,
1692 dbi_base_addr + PCIE20_GEN3_RELATED_REG,
1693 readl_relaxed(dev->dm_core +
1694 PCIE20_GEN3_RELATED_REG));
1695
1696 writel_relaxed(0x80000001,
1697 dev->dm_core + PCIE20_PIPE_LOOPBACK_CONTROL);
1698 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001699 "PCIe: RC%d: 0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001700 dev->rc_idx,
1701 dbi_base_addr + PCIE20_PIPE_LOOPBACK_CONTROL,
1702 readl_relaxed(dev->dm_core +
1703 PCIE20_PIPE_LOOPBACK_CONTROL));
1704
1705 writel_relaxed(0x00010124,
1706 dev->dm_core + PCIE20_PORT_LINK_CTRL_REG);
1707 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001708 "PCIe: RC%d: 0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001709 dev->rc_idx,
1710 dbi_base_addr + PCIE20_PORT_LINK_CTRL_REG,
1711 readl_relaxed(dev->dm_core +
1712 PCIE20_PORT_LINK_CTRL_REG));
1713 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001714 case MSM_PCIE_SETUP_LOOPBACK_IATU:
Tony Truong06ff2ed2017-01-15 19:28:13 -08001715 PCIE_DBG_FS(dev, "PCIe: RC%d: Setup iATU.\n", dev->rc_idx);
1716
1717 if (!loopback_ddr_vir) {
1718 PCIE_DBG_FS(dev,
1719 "PCIe: RC%d: DDR address is not mapped.\n",
1720 dev->rc_idx);
1721 break;
1722 }
1723
1724 writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_VIEWPORT);
1725 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001726 "PCIe: RC%d: PCIE20_PLR_IATU_VIEWPORT:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001727 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_VIEWPORT,
1728 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
1729
1730 writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_CTRL1);
1731 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001732 "PCIe: RC%d: PCIE20_PLR_IATU_CTRL1:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001733 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_CTRL1,
1734 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
1735
1736 writel_relaxed(loopback_lbar_phy,
1737 dev->dm_core + PCIE20_PLR_IATU_LBAR);
1738 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001739 "PCIe: RC%d: PCIE20_PLR_IATU_LBAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001740 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LBAR,
1741 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
1742
1743 writel_relaxed(0x0, dev->dm_core + PCIE20_PLR_IATU_UBAR);
1744 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001745 "PCIe: RC%d: PCIE20_PLR_IATU_UBAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001746 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_UBAR,
1747 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
1748
1749 writel_relaxed(loopback_lbar_phy + 0xfff,
1750 dev->dm_core + PCIE20_PLR_IATU_LAR);
1751 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001752 "PCIe: RC%d: PCIE20_PLR_IATU_LAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001753 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LAR,
1754 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
1755
1756 writel_relaxed(loopback_ddr_phy,
1757 dev->dm_core + PCIE20_PLR_IATU_LTAR);
1758 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001759 "PCIe: RC%d: PCIE20_PLR_IATU_LTAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001760 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_LTAR,
1761 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
1762
1763 writel_relaxed(0, dev->dm_core + PCIE20_PLR_IATU_UTAR);
1764 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001765 "PCIe: RC%d: PCIE20_PLR_IATU_UTAR:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001766 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_UTAR,
1767 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
1768
1769 writel_relaxed(0x80000000,
1770 dev->dm_core + PCIE20_PLR_IATU_CTRL2);
1771 PCIE_DBG_FS(dev,
Tony Truong09223e42017-11-08 16:50:20 -08001772 "PCIe: RC%d: PCIE20_PLR_IATU_CTRL2:\t0x%x: 0x%x\n",
Tony Truong06ff2ed2017-01-15 19:28:13 -08001773 dev->rc_idx, dbi_base_addr + PCIE20_PLR_IATU_CTRL2,
1774 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
1775 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001776 case MSM_PCIE_READ_DDR:
Tony Truong06ff2ed2017-01-15 19:28:13 -08001777 PCIE_DBG_FS(dev,
1778 "PCIe: RC%d: Read DDR values.\n",
1779 dev->rc_idx);
1780
1781 if (!loopback_ddr_vir) {
1782 PCIE_DBG_FS(dev,
1783 "PCIe: RC%d: DDR is not mapped\n",
1784 dev->rc_idx);
1785 break;
1786 }
1787
1788 for (i = 0; i < SZ_1K; i += 8) {
1789 PCIE_DBG_FS(dev,
1790 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1791 i,
1792 loopback_ddr_vir[i],
1793 loopback_ddr_vir[i + 1],
1794 loopback_ddr_vir[i + 2],
1795 loopback_ddr_vir[i + 3],
1796 loopback_ddr_vir[i + 4],
1797 loopback_ddr_vir[i + 5],
1798 loopback_ddr_vir[i + 6],
1799 loopback_ddr_vir[i + 7]);
1800 }
1801 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001802 case MSM_PCIE_READ_LBAR:
Tony Truong06ff2ed2017-01-15 19:28:13 -08001803 PCIE_DBG_FS(dev,
1804 "PCIe: RC%d: Read LBAR values.\n",
1805 dev->rc_idx);
1806
1807 if (!loopback_lbar_vir) {
1808 PCIE_DBG_FS(dev,
1809 "PCIe: RC%d: LBAR address is not mapped\n",
1810 dev->rc_idx);
1811 break;
1812 }
1813
1814 for (i = 0; i < SZ_4K; i += 32) {
1815 PCIE_DBG_FS(dev,
1816 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1817 i,
1818 readl_relaxed(loopback_lbar_vir + i),
1819 readl_relaxed(loopback_lbar_vir + (i + 4)),
1820 readl_relaxed(loopback_lbar_vir + (i + 8)),
1821 readl_relaxed(loopback_lbar_vir + (i + 12)),
1822 readl_relaxed(loopback_lbar_vir + (i + 16)),
1823 readl_relaxed(loopback_lbar_vir + (i + 20)),
1824 readl_relaxed(loopback_lbar_vir + (i + 24)),
1825 readl_relaxed(loopback_lbar_vir + (i + 28)));
1826 }
1827 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001828 case MSM_PCIE_WRITE_DDR:
Tony Truong06ff2ed2017-01-15 19:28:13 -08001829 PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to DDR.\n",
1830 dev->rc_idx, loopback_val);
1831
1832 if (!loopback_ddr_vir) {
1833 PCIE_DBG_FS(dev,
1834 "PCIe: RC%d: DDR address is not mapped\n",
1835 dev->rc_idx);
1836 break;
1837 }
1838
1839 memset(loopback_ddr_vir, loopback_val,
1840 (SZ_1K * sizeof(*loopback_ddr_vir)));
1841
1842 if (unlikely(loopback_val == UINT_MAX))
1843 loopback_val = 1;
1844 else
1845 loopback_val++;
1846 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001847 case MSM_PCIE_WRITE_LBAR:
Tony Truong06ff2ed2017-01-15 19:28:13 -08001848 PCIE_DBG_FS(dev, "PCIe: RC%d: Write 0x%x to LBAR.\n",
1849 dev->rc_idx, loopback_val);
1850
1851 if (!loopback_lbar_vir) {
1852 PCIE_DBG_FS(dev,
1853 "PCIe: RC%d: LBAR address is not mapped\n",
1854 dev->rc_idx);
1855 break;
1856 }
1857
1858 for (i = 0; i < SZ_4K; i += 32) {
1859 writel_relaxed(loopback_val,
1860 loopback_lbar_vir + i),
1861 writel_relaxed(loopback_val,
1862 loopback_lbar_vir + (i + 4)),
1863 writel_relaxed(loopback_val,
1864 loopback_lbar_vir + (i + 8)),
1865 writel_relaxed(loopback_val,
1866 loopback_lbar_vir + (i + 12)),
1867 writel_relaxed(loopback_val,
1868 loopback_lbar_vir + (i + 16)),
1869 writel_relaxed(loopback_val,
1870 loopback_lbar_vir + (i + 20)),
1871 writel_relaxed(loopback_val,
1872 loopback_lbar_vir + (i + 24)),
1873 writel_relaxed(loopback_val,
1874 loopback_lbar_vir + (i + 28));
1875 }
1876
1877 if (unlikely(loopback_val == UINT_MAX))
1878 loopback_val = 1;
1879 else
1880 loopback_val++;
1881 break;
Tony Truongbad3b742017-11-22 14:40:19 -08001882 case MSM_PCIE_DISABLE_AER:
1883 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: clear AER enable flag\n\n",
1884 dev->rc_idx);
1885 dev->aer_enable = false;
1886 break;
1887 case MSM_PCIE_ENABLE_AER:
1888 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: set AER enable flag\n\n",
1889 dev->rc_idx);
1890 dev->aer_enable = true;
1891 break;
1892 case MSM_PCIE_GPIO_STATUS:
1893 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: PERST and WAKE status\n\n",
1894 dev->rc_idx);
1895 PCIE_DBG_FS(dev,
1896 "PCIe: RC%d: PERST: gpio%u value: %d\n",
1897 dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_PERST].num,
1898 gpio_get_value(dev->gpio[MSM_PCIE_GPIO_PERST].num));
1899 PCIE_DBG_FS(dev,
1900 "PCIe: RC%d: WAKE: gpio%u value: %d\n",
1901 dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_WAKE].num,
1902 gpio_get_value(dev->gpio[MSM_PCIE_GPIO_WAKE].num));
1903 break;
1904 case MSM_PCIE_ASSERT_PERST:
1905 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: assert PERST\n\n",
1906 dev->rc_idx);
1907 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
1908 dev->gpio[MSM_PCIE_GPIO_PERST].on);
1909 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
1910 break;
1911 case MSM_PCIE_DEASSERT_PERST:
1912 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: de-assert PERST\n\n",
1913 dev->rc_idx);
1914 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
1915 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
1916 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
1917 break;
1918 case MSM_PCIE_KEEP_RESOURCES_ON:
1919 PCIE_DBG_FS(dev,
1920 "\n\nPCIe: RC%d: set keep resources on flag\n\n",
1921 dev->rc_idx);
1922 msm_pcie_keep_resources_on |= BIT(dev->rc_idx);
1923 break;
1924 case MSM_PCIE_FORCE_GEN1:
1925 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: set force gen1 flag\n\n",
1926 dev->rc_idx);
1927 msm_pcie_force_gen1 |= BIT(dev->rc_idx);
1928 break;
Tony Truong349ee492014-10-01 17:35:56 -07001929 default:
1930 PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
1931 break;
1932 }
1933}
1934
1935int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
1936 u32 offset, u32 mask, u32 value)
1937{
1938 int ret = 0;
1939 struct msm_pcie_dev_t *pdev = NULL;
1940
1941 if (!dev) {
1942 pr_err("PCIe: the input pci dev is NULL.\n");
1943 return -ENODEV;
1944 }
1945
Tony Truongbad3b742017-11-22 14:40:19 -08001946 if (option == MSM_PCIE_READ_PCIE_REGISTER ||
1947 option == MSM_PCIE_WRITE_PCIE_REGISTER ||
1948 option == MSM_PCIE_DUMP_PCIE_REGISTER_SPACE) {
1949 if (!base || base >= MSM_PCIE_MAX_RES) {
Tony Truong349ee492014-10-01 17:35:56 -07001950 PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
1951 PCIE_DBG_FS(pdev,
1952 "PCIe: base_sel is still 0x%x\n", base_sel);
1953 return -EINVAL;
1954 }
1955
1956 base_sel = base;
1957 PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
1958
Tony Truongbad3b742017-11-22 14:40:19 -08001959 if (option == MSM_PCIE_READ_PCIE_REGISTER ||
1960 option == MSM_PCIE_WRITE_PCIE_REGISTER) {
Tony Truong349ee492014-10-01 17:35:56 -07001961 wr_offset = offset;
1962 wr_mask = mask;
1963 wr_value = value;
1964
1965 PCIE_DBG_FS(pdev,
1966 "PCIe: wr_offset is now 0x%x\n", wr_offset);
1967 PCIE_DBG_FS(pdev,
1968 "PCIe: wr_mask is now 0x%x\n", wr_mask);
1969 PCIE_DBG_FS(pdev,
1970 "PCIe: wr_value is now 0x%x\n", wr_value);
1971 }
1972 }
1973
1974 pdev = PCIE_BUS_PRIV_DATA(dev->bus);
Tony Truongbad3b742017-11-22 14:40:19 -08001975 rc_sel = BIT(pdev->rc_idx);
Tony Truong349ee492014-10-01 17:35:56 -07001976
1977 msm_pcie_sel_debug_testcase(pdev, option);
1978
1979 return ret;
1980}
1981EXPORT_SYMBOL(msm_pcie_debug_info);
1982
Tony Truongbd9a3412017-02-27 18:30:13 -08001983#ifdef CONFIG_SYSFS
1984static ssize_t msm_pcie_enumerate_store(struct device *dev,
1985 struct device_attribute *attr,
1986 const char *buf, size_t count)
1987{
1988 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
1989 dev_get_drvdata(dev);
1990
1991 if (pcie_dev)
1992 msm_pcie_enumerate(pcie_dev->rc_idx);
1993
1994 return count;
1995}
1996
1997static DEVICE_ATTR(enumerate, 0200, NULL, msm_pcie_enumerate_store);
1998
1999static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
2000{
2001 int ret;
2002
2003 ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
2004 if (ret)
2005 PCIE_DBG_FS(dev,
2006 "RC%d: failed to create sysfs enumerate node\n",
2007 dev->rc_idx);
2008}
2009
2010static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2011{
2012 if (dev->pdev)
2013 device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
2014}
2015#else
2016static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
2017{
2018}
2019
2020static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
2021{
2022}
2023#endif
2024
Tony Truong349ee492014-10-01 17:35:56 -07002025#ifdef CONFIG_DEBUG_FS
2026static struct dentry *dent_msm_pcie;
2027static struct dentry *dfile_rc_sel;
2028static struct dentry *dfile_case;
2029static struct dentry *dfile_base_sel;
2030static struct dentry *dfile_linkdown_panic;
2031static struct dentry *dfile_wr_offset;
2032static struct dentry *dfile_wr_mask;
2033static struct dentry *dfile_wr_value;
Tony Truong9f2c7722017-02-28 15:02:27 -08002034static struct dentry *dfile_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002035static struct dentry *dfile_aer_enable;
2036static struct dentry *dfile_corr_counter_limit;
2037
2038static u32 rc_sel_max;
2039
Tony Truongbad3b742017-11-22 14:40:19 -08002040static int msm_pcie_debugfs_parse_input(const char __user *buf,
2041 size_t count, unsigned int *data)
2042{
2043 unsigned long ret;
2044 char *str, *str_temp;
2045
2046 str = kmalloc(count + 1, GFP_KERNEL);
2047 if (!str)
2048 return -ENOMEM;
2049
2050 ret = copy_from_user(str, buf, count);
2051 if (ret) {
2052 kfree(str);
2053 return -EFAULT;
2054 }
2055
2056 str[count] = 0;
2057 str_temp = str;
2058
2059 ret = get_option(&str_temp, data);
2060 kfree(str);
2061 if (ret != 1)
2062 return -EINVAL;
2063
2064 return 0;
2065}
2066
2067static int msm_pcie_debugfs_case_show(struct seq_file *m, void *v)
2068{
2069 int i;
2070
2071 for (i = 0; i < MSM_PCIE_MAX_DEBUGFS_OPTION; i++)
2072 seq_printf(m, "\t%d:\t %s\n", i,
2073 msm_pcie_debugfs_option_desc[i]);
2074
2075 return 0;
2076}
2077
2078static int msm_pcie_debugfs_case_open(struct inode *inode, struct file *file)
2079{
2080 return single_open(file, msm_pcie_debugfs_case_show, NULL);
2081}
2082
2083static ssize_t msm_pcie_debugfs_case_select(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002084 const char __user *buf,
2085 size_t count, loff_t *ppos)
2086{
Tony Truongbad3b742017-11-22 14:40:19 -08002087 int i, ret;
Tony Truong349ee492014-10-01 17:35:56 -07002088 unsigned int testcase = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002089
Tony Truongbad3b742017-11-22 14:40:19 -08002090 ret = msm_pcie_debugfs_parse_input(buf, count, &testcase);
Tony Truong349ee492014-10-01 17:35:56 -07002091 if (ret)
Tony Truongbad3b742017-11-22 14:40:19 -08002092 return ret;
Tony Truong349ee492014-10-01 17:35:56 -07002093
2094 pr_alert("PCIe: TEST: %d\n", testcase);
2095
2096 for (i = 0; i < MAX_RC_NUM; i++) {
Tony Truongbad3b742017-11-22 14:40:19 -08002097 if (rc_sel & BIT(i))
2098 msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
Tony Truong349ee492014-10-01 17:35:56 -07002099 }
2100
2101 return count;
2102}
2103
Tony Truongbad3b742017-11-22 14:40:19 -08002104static const struct file_operations msm_pcie_debugfs_case_ops = {
2105 .open = msm_pcie_debugfs_case_open,
2106 .release = single_release,
2107 .read = seq_read,
2108 .write = msm_pcie_debugfs_case_select,
Tony Truong349ee492014-10-01 17:35:56 -07002109};
2110
Tony Truongbad3b742017-11-22 14:40:19 -08002111static ssize_t msm_pcie_debugfs_rc_select(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002112 const char __user *buf,
2113 size_t count, loff_t *ppos)
2114{
Tony Truongbad3b742017-11-22 14:40:19 -08002115 int i, ret;
Tony Truong349ee492014-10-01 17:35:56 -07002116 u32 new_rc_sel = 0;
2117
Tony Truongbad3b742017-11-22 14:40:19 -08002118 ret = msm_pcie_debugfs_parse_input(buf, count, &new_rc_sel);
Tony Truong349ee492014-10-01 17:35:56 -07002119 if (ret)
Tony Truongbad3b742017-11-22 14:40:19 -08002120 return ret;
Tony Truong349ee492014-10-01 17:35:56 -07002121
2122 if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
2123 pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
2124 pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
2125 } else {
2126 rc_sel = new_rc_sel;
2127 pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
2128 }
2129
2130 pr_alert("PCIe: the following RC(s) will be tested:\n");
Tony Truongbad3b742017-11-22 14:40:19 -08002131 for (i = 0; i < MAX_RC_NUM; i++)
2132 if (rc_sel & BIT(i))
Tony Truong349ee492014-10-01 17:35:56 -07002133 pr_alert("RC %d\n", i);
Tony Truong349ee492014-10-01 17:35:56 -07002134
2135 return count;
2136}
2137
Tony Truongbad3b742017-11-22 14:40:19 -08002138static const struct file_operations msm_pcie_debugfs_rc_select_ops = {
2139 .write = msm_pcie_debugfs_rc_select,
Tony Truong349ee492014-10-01 17:35:56 -07002140};
2141
Tony Truongbad3b742017-11-22 14:40:19 -08002142static ssize_t msm_pcie_debugfs_base_select(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002143 const char __user *buf,
2144 size_t count, loff_t *ppos)
2145{
Tony Truongbad3b742017-11-22 14:40:19 -08002146 int ret;
Tony Truong349ee492014-10-01 17:35:56 -07002147 u32 new_base_sel = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002148
Tony Truongbad3b742017-11-22 14:40:19 -08002149 ret = msm_pcie_debugfs_parse_input(buf, count, &new_base_sel);
Tony Truong349ee492014-10-01 17:35:56 -07002150 if (ret)
Tony Truongbad3b742017-11-22 14:40:19 -08002151 return ret;
Tony Truong349ee492014-10-01 17:35:56 -07002152
Tony Truongbad3b742017-11-22 14:40:19 -08002153 if (!new_base_sel || new_base_sel > MSM_PCIE_MAX_RES) {
Tony Truong349ee492014-10-01 17:35:56 -07002154 pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
2155 new_base_sel);
2156 pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
2157 } else {
2158 base_sel = new_base_sel;
2159 pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
Tony Truongbad3b742017-11-22 14:40:19 -08002160 pr_alert("%s\n", msm_pcie_res_info[base_sel - 1].name);
Tony Truong349ee492014-10-01 17:35:56 -07002161 }
2162
Tony Truong349ee492014-10-01 17:35:56 -07002163 return count;
2164}
2165
Tony Truongbad3b742017-11-22 14:40:19 -08002166static const struct file_operations msm_pcie_debugfs_base_select_ops = {
2167 .write = msm_pcie_debugfs_base_select,
Tony Truong349ee492014-10-01 17:35:56 -07002168};
2169
Tony Truongbad3b742017-11-22 14:40:19 -08002170static ssize_t msm_pcie_debugfs_linkdown_panic(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002171 const char __user *buf,
2172 size_t count, loff_t *ppos)
2173{
Tony Truongbad3b742017-11-22 14:40:19 -08002174 int i, ret;
Tony Truong349ee492014-10-01 17:35:56 -07002175 u32 new_linkdown_panic = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002176
Tony Truongbad3b742017-11-22 14:40:19 -08002177 ret = msm_pcie_debugfs_parse_input(buf, count, &new_linkdown_panic);
Tony Truong349ee492014-10-01 17:35:56 -07002178 if (ret)
Tony Truongbad3b742017-11-22 14:40:19 -08002179 return ret;
Tony Truong349ee492014-10-01 17:35:56 -07002180
Tony Truongbad3b742017-11-22 14:40:19 -08002181 new_linkdown_panic = !!new_linkdown_panic;
Tony Truong349ee492014-10-01 17:35:56 -07002182
Tony Truongbad3b742017-11-22 14:40:19 -08002183 for (i = 0; i < MAX_RC_NUM; i++) {
2184 if (rc_sel & BIT(i)) {
2185 msm_pcie_dev[i].linkdown_panic =
2186 new_linkdown_panic;
2187 PCIE_DBG_FS(&msm_pcie_dev[i],
2188 "PCIe: RC%d: linkdown_panic is now %d\n",
2189 i, msm_pcie_dev[i].linkdown_panic);
Tony Truong349ee492014-10-01 17:35:56 -07002190 }
Tony Truong349ee492014-10-01 17:35:56 -07002191 }
2192
2193 return count;
2194}
2195
Tony Truongbad3b742017-11-22 14:40:19 -08002196static const struct file_operations msm_pcie_debugfs_linkdown_panic_ops = {
2197 .write = msm_pcie_debugfs_linkdown_panic,
Tony Truong349ee492014-10-01 17:35:56 -07002198};
2199
Tony Truongbad3b742017-11-22 14:40:19 -08002200static ssize_t msm_pcie_debugfs_wr_offset(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002201 const char __user *buf,
2202 size_t count, loff_t *ppos)
2203{
Tony Truongbad3b742017-11-22 14:40:19 -08002204 int ret;
Tony Truong349ee492014-10-01 17:35:56 -07002205
2206 wr_offset = 0;
Tony Truongbad3b742017-11-22 14:40:19 -08002207
2208 ret = msm_pcie_debugfs_parse_input(buf, count, &wr_offset);
2209 if (ret)
2210 return ret;
Tony Truong349ee492014-10-01 17:35:56 -07002211
2212 pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
2213
2214 return count;
2215}
2216
Tony Truongbad3b742017-11-22 14:40:19 -08002217static const struct file_operations msm_pcie_debugfs_wr_offset_ops = {
2218 .write = msm_pcie_debugfs_wr_offset,
Tony Truong349ee492014-10-01 17:35:56 -07002219};
2220
Tony Truongbad3b742017-11-22 14:40:19 -08002221static ssize_t msm_pcie_debugfs_wr_mask(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002222 const char __user *buf,
2223 size_t count, loff_t *ppos)
2224{
Tony Truongbad3b742017-11-22 14:40:19 -08002225 int ret;
Tony Truong349ee492014-10-01 17:35:56 -07002226
2227 wr_mask = 0;
Tony Truongbad3b742017-11-22 14:40:19 -08002228
2229 ret = msm_pcie_debugfs_parse_input(buf, count, &wr_mask);
2230 if (ret)
2231 return ret;
Tony Truong349ee492014-10-01 17:35:56 -07002232
2233 pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
2234
2235 return count;
2236}
2237
Tony Truongbad3b742017-11-22 14:40:19 -08002238static const struct file_operations msm_pcie_debugfs_wr_mask_ops = {
2239 .write = msm_pcie_debugfs_wr_mask,
Tony Truong349ee492014-10-01 17:35:56 -07002240};
Tony Truongbad3b742017-11-22 14:40:19 -08002241static ssize_t msm_pcie_debugfs_wr_value(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002242 const char __user *buf,
2243 size_t count, loff_t *ppos)
2244{
Tony Truongbad3b742017-11-22 14:40:19 -08002245 int ret;
Tony Truong349ee492014-10-01 17:35:56 -07002246
2247 wr_value = 0;
Tony Truongbad3b742017-11-22 14:40:19 -08002248
2249 ret = msm_pcie_debugfs_parse_input(buf, count, &wr_value);
2250 if (ret)
2251 return ret;
Tony Truong349ee492014-10-01 17:35:56 -07002252
2253 pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
2254
2255 return count;
2256}
2257
Tony Truongbad3b742017-11-22 14:40:19 -08002258static const struct file_operations msm_pcie_debugfs_wr_value_ops = {
2259 .write = msm_pcie_debugfs_wr_value,
Tony Truong349ee492014-10-01 17:35:56 -07002260};
2261
Tony Truongbad3b742017-11-22 14:40:19 -08002262static ssize_t msm_pcie_debugfs_boot_option(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002263 const char __user *buf,
2264 size_t count, loff_t *ppos)
2265{
Tony Truongbad3b742017-11-22 14:40:19 -08002266 int i, ret;
Tony Truong9f2c7722017-02-28 15:02:27 -08002267 u32 new_boot_option = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002268
Tony Truongbad3b742017-11-22 14:40:19 -08002269 ret = msm_pcie_debugfs_parse_input(buf, count, &new_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002270 if (ret)
Tony Truongbad3b742017-11-22 14:40:19 -08002271 return ret;
Tony Truong349ee492014-10-01 17:35:56 -07002272
Tony Truongbad3b742017-11-22 14:40:19 -08002273 if (new_boot_option <= (BIT(0) | BIT(1))) {
Tony Truong349ee492014-10-01 17:35:56 -07002274 for (i = 0; i < MAX_RC_NUM; i++) {
Tony Truongbad3b742017-11-22 14:40:19 -08002275 if (rc_sel & BIT(i)) {
Tony Truong9f2c7722017-02-28 15:02:27 -08002276 msm_pcie_dev[i].boot_option = new_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002277 PCIE_DBG_FS(&msm_pcie_dev[i],
Tony Truong9f2c7722017-02-28 15:02:27 -08002278 "PCIe: RC%d: boot_option is now 0x%x\n",
2279 i, msm_pcie_dev[i].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002280 }
2281 }
2282 } else {
Tony Truong9f2c7722017-02-28 15:02:27 -08002283 pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
2284 new_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002285 }
2286
2287 return count;
2288}
2289
Tony Truongbad3b742017-11-22 14:40:19 -08002290static const struct file_operations msm_pcie_debugfs_boot_option_ops = {
2291 .write = msm_pcie_debugfs_boot_option,
Tony Truong349ee492014-10-01 17:35:56 -07002292};
2293
Tony Truongbad3b742017-11-22 14:40:19 -08002294static ssize_t msm_pcie_debugfs_aer_enable(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002295 const char __user *buf,
2296 size_t count, loff_t *ppos)
2297{
Tony Truongbad3b742017-11-22 14:40:19 -08002298 int i, ret;
Tony Truong349ee492014-10-01 17:35:56 -07002299 u32 new_aer_enable = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002300
Tony Truongbad3b742017-11-22 14:40:19 -08002301 ret = msm_pcie_debugfs_parse_input(buf, count, &new_aer_enable);
Tony Truong349ee492014-10-01 17:35:56 -07002302 if (ret)
Tony Truongbad3b742017-11-22 14:40:19 -08002303 return ret;
Tony Truong349ee492014-10-01 17:35:56 -07002304
Tony Truongbad3b742017-11-22 14:40:19 -08002305 new_aer_enable = !!new_aer_enable;
Tony Truong349ee492014-10-01 17:35:56 -07002306
2307 for (i = 0; i < MAX_RC_NUM; i++) {
Tony Truongbad3b742017-11-22 14:40:19 -08002308 if (rc_sel & BIT(i)) {
Tony Truong349ee492014-10-01 17:35:56 -07002309 msm_pcie_dev[i].aer_enable = new_aer_enable;
2310 PCIE_DBG_FS(&msm_pcie_dev[i],
2311 "PCIe: RC%d: aer_enable is now %d\n",
2312 i, msm_pcie_dev[i].aer_enable);
2313
2314 msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
2315 PCIE20_BRIDGE_CTRL,
2316 new_aer_enable ? 0 : BIT(16),
2317 new_aer_enable ? BIT(16) : 0);
2318
2319 PCIE_DBG_FS(&msm_pcie_dev[i],
2320 "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
2321 readl_relaxed(msm_pcie_dev[i].dm_core +
2322 PCIE20_BRIDGE_CTRL));
2323 }
2324 }
2325
2326 return count;
2327}
2328
Tony Truongbad3b742017-11-22 14:40:19 -08002329static const struct file_operations msm_pcie_debugfs_aer_enable_ops = {
2330 .write = msm_pcie_debugfs_aer_enable,
Tony Truong349ee492014-10-01 17:35:56 -07002331};
2332
Tony Truongbad3b742017-11-22 14:40:19 -08002333static ssize_t msm_pcie_debugfs_corr_counter_limit(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002334 const char __user *buf,
2335 size_t count, loff_t *ppos)
2336{
Tony Truongbad3b742017-11-22 14:40:19 -08002337 int ret;
Tony Truong349ee492014-10-01 17:35:56 -07002338
2339 corr_counter_limit = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002340
Tony Truongbad3b742017-11-22 14:40:19 -08002341 ret = msm_pcie_debugfs_parse_input(buf, count, &corr_counter_limit);
2342 if (ret)
2343 return ret;
2344
2345 pr_info("PCIe: corr_counter_limit is now %u\n", corr_counter_limit);
Tony Truong349ee492014-10-01 17:35:56 -07002346
2347 return count;
2348}
2349
Tony Truongbad3b742017-11-22 14:40:19 -08002350static const struct file_operations msm_pcie_debugfs_corr_counter_limit_ops = {
2351 .write = msm_pcie_debugfs_corr_counter_limit,
Tony Truong349ee492014-10-01 17:35:56 -07002352};
2353
2354static void msm_pcie_debugfs_init(void)
2355{
2356 rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
2357 wr_mask = 0xffffffff;
2358
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002359 dent_msm_pcie = debugfs_create_dir("pci-msm", NULL);
Tony Truong349ee492014-10-01 17:35:56 -07002360 if (IS_ERR(dent_msm_pcie)) {
2361 pr_err("PCIe: fail to create the folder for debug_fs.\n");
2362 return;
2363 }
2364
2365 dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002366 dent_msm_pcie, NULL,
Tony Truongbad3b742017-11-22 14:40:19 -08002367 &msm_pcie_debugfs_rc_select_ops);
Tony Truong349ee492014-10-01 17:35:56 -07002368 if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
2369 pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
2370 goto rc_sel_error;
2371 }
2372
2373 dfile_case = debugfs_create_file("case", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002374 dent_msm_pcie, NULL,
Tony Truongbad3b742017-11-22 14:40:19 -08002375 &msm_pcie_debugfs_case_ops);
Tony Truong349ee492014-10-01 17:35:56 -07002376 if (!dfile_case || IS_ERR(dfile_case)) {
2377 pr_err("PCIe: fail to create the file for debug_fs case.\n");
2378 goto case_error;
2379 }
2380
2381 dfile_base_sel = debugfs_create_file("base_sel", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002382 dent_msm_pcie, NULL,
Tony Truongbad3b742017-11-22 14:40:19 -08002383 &msm_pcie_debugfs_base_select_ops);
Tony Truong349ee492014-10-01 17:35:56 -07002384 if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
2385 pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
2386 goto base_sel_error;
2387 }
2388
2389 dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002390 dent_msm_pcie, NULL,
Tony Truongbad3b742017-11-22 14:40:19 -08002391 &msm_pcie_debugfs_linkdown_panic_ops);
Tony Truong349ee492014-10-01 17:35:56 -07002392 if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
2393 pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
2394 goto linkdown_panic_error;
2395 }
2396
2397 dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002398 dent_msm_pcie, NULL,
Tony Truongbad3b742017-11-22 14:40:19 -08002399 &msm_pcie_debugfs_wr_offset_ops);
Tony Truong349ee492014-10-01 17:35:56 -07002400 if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
2401 pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
2402 goto wr_offset_error;
2403 }
2404
2405 dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002406 dent_msm_pcie, NULL,
Tony Truongbad3b742017-11-22 14:40:19 -08002407 &msm_pcie_debugfs_wr_mask_ops);
Tony Truong349ee492014-10-01 17:35:56 -07002408 if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
2409 pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
2410 goto wr_mask_error;
2411 }
2412
2413 dfile_wr_value = debugfs_create_file("wr_value", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002414 dent_msm_pcie, NULL,
Tony Truongbad3b742017-11-22 14:40:19 -08002415 &msm_pcie_debugfs_wr_value_ops);
Tony Truong349ee492014-10-01 17:35:56 -07002416 if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
2417 pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
2418 goto wr_value_error;
2419 }
2420
Tony Truong9f2c7722017-02-28 15:02:27 -08002421 dfile_boot_option = debugfs_create_file("boot_option", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002422 dent_msm_pcie, NULL,
Tony Truongbad3b742017-11-22 14:40:19 -08002423 &msm_pcie_debugfs_boot_option_ops);
Tony Truong9f2c7722017-02-28 15:02:27 -08002424 if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
2425 pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
2426 goto boot_option_error;
Tony Truong349ee492014-10-01 17:35:56 -07002427 }
2428
2429 dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002430 dent_msm_pcie, NULL,
Tony Truongbad3b742017-11-22 14:40:19 -08002431 &msm_pcie_debugfs_aer_enable_ops);
Tony Truong349ee492014-10-01 17:35:56 -07002432 if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
2433 pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
2434 goto aer_enable_error;
2435 }
2436
2437 dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
Tony Truongbad3b742017-11-22 14:40:19 -08002438 0664, dent_msm_pcie, NULL,
2439 &msm_pcie_debugfs_corr_counter_limit_ops);
Tony Truong349ee492014-10-01 17:35:56 -07002440 if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
2441 pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
2442 goto corr_counter_limit_error;
2443 }
2444 return;
2445
2446corr_counter_limit_error:
2447 debugfs_remove(dfile_aer_enable);
2448aer_enable_error:
Tony Truong9f2c7722017-02-28 15:02:27 -08002449 debugfs_remove(dfile_boot_option);
2450boot_option_error:
Tony Truong349ee492014-10-01 17:35:56 -07002451 debugfs_remove(dfile_wr_value);
2452wr_value_error:
2453 debugfs_remove(dfile_wr_mask);
2454wr_mask_error:
2455 debugfs_remove(dfile_wr_offset);
2456wr_offset_error:
2457 debugfs_remove(dfile_linkdown_panic);
2458linkdown_panic_error:
2459 debugfs_remove(dfile_base_sel);
2460base_sel_error:
2461 debugfs_remove(dfile_case);
2462case_error:
2463 debugfs_remove(dfile_rc_sel);
2464rc_sel_error:
2465 debugfs_remove(dent_msm_pcie);
2466}
2467
2468static void msm_pcie_debugfs_exit(void)
2469{
2470 debugfs_remove(dfile_rc_sel);
2471 debugfs_remove(dfile_case);
2472 debugfs_remove(dfile_base_sel);
2473 debugfs_remove(dfile_linkdown_panic);
2474 debugfs_remove(dfile_wr_offset);
2475 debugfs_remove(dfile_wr_mask);
2476 debugfs_remove(dfile_wr_value);
Tony Truong9f2c7722017-02-28 15:02:27 -08002477 debugfs_remove(dfile_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002478 debugfs_remove(dfile_aer_enable);
2479 debugfs_remove(dfile_corr_counter_limit);
2480}
2481#else
2482static void msm_pcie_debugfs_init(void)
2483{
2484}
2485
2486static void msm_pcie_debugfs_exit(void)
2487{
2488}
2489#endif
2490
2491static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
2492{
2493 return readl_relaxed(dev->dm_core +
2494 PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
2495}
2496
2497/**
2498 * msm_pcie_iatu_config - configure outbound address translation region
2499 * @dev: root commpex
2500 * @nr: region number
2501 * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
2502 * @host_addr: - region start address on host
2503 * @host_end: - region end address (low 32 bit) on host,
2504 * upper 32 bits are same as for @host_addr
2505 * @target_addr: - region start address on target
2506 */
2507static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
2508 unsigned long host_addr, u32 host_end,
2509 unsigned long target_addr)
2510{
Tony Truongf49801f2017-10-25 11:22:35 -07002511 void __iomem *iatu_base = dev->iatu ? dev->iatu : dev->dm_core;
Tony Truong349ee492014-10-01 17:35:56 -07002512
Tony Truongf49801f2017-10-25 11:22:35 -07002513 u32 iatu_viewport_offset;
2514 u32 iatu_ctrl1_offset;
2515 u32 iatu_ctrl2_offset;
2516 u32 iatu_lbar_offset;
2517 u32 iatu_ubar_offset;
2518 u32 iatu_lar_offset;
2519 u32 iatu_ltar_offset;
2520 u32 iatu_utar_offset;
2521
2522 if (dev->iatu) {
2523 iatu_viewport_offset = 0;
2524 iatu_ctrl1_offset = PCIE_IATU_CTRL1(nr);
2525 iatu_ctrl2_offset = PCIE_IATU_CTRL2(nr);
2526 iatu_lbar_offset = PCIE_IATU_LBAR(nr);
2527 iatu_ubar_offset = PCIE_IATU_UBAR(nr);
2528 iatu_lar_offset = PCIE_IATU_LAR(nr);
2529 iatu_ltar_offset = PCIE_IATU_LTAR(nr);
2530 iatu_utar_offset = PCIE_IATU_UTAR(nr);
2531 } else {
2532 iatu_viewport_offset = PCIE20_PLR_IATU_VIEWPORT;
2533 iatu_ctrl1_offset = PCIE20_PLR_IATU_CTRL1;
2534 iatu_ctrl2_offset = PCIE20_PLR_IATU_CTRL2;
2535 iatu_lbar_offset = PCIE20_PLR_IATU_LBAR;
2536 iatu_ubar_offset = PCIE20_PLR_IATU_UBAR;
2537 iatu_lar_offset = PCIE20_PLR_IATU_LAR;
2538 iatu_ltar_offset = PCIE20_PLR_IATU_LTAR;
2539 iatu_utar_offset = PCIE20_PLR_IATU_UTAR;
2540 }
2541
2542 if (dev->shadow_en && iatu_viewport_offset) {
Tony Truong349ee492014-10-01 17:35:56 -07002543 dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
2544 nr;
2545 dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
2546 type;
2547 dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
2548 lower_32_bits(host_addr);
2549 dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
2550 upper_32_bits(host_addr);
2551 dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
2552 host_end;
2553 dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
2554 lower_32_bits(target_addr);
2555 dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
2556 upper_32_bits(target_addr);
2557 dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
2558 BIT(31);
2559 }
2560
2561 /* select region */
Tony Truongf49801f2017-10-25 11:22:35 -07002562 if (iatu_viewport_offset) {
2563 writel_relaxed(nr, iatu_base + iatu_viewport_offset);
2564 /* ensure that hardware locks it */
2565 wmb();
2566 }
Tony Truong349ee492014-10-01 17:35:56 -07002567
2568 /* switch off region before changing it */
Tony Truongf49801f2017-10-25 11:22:35 -07002569 writel_relaxed(0, iatu_base + iatu_ctrl2_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002570 /* and wait till it propagates to the hardware */
2571 wmb();
2572
Tony Truongf49801f2017-10-25 11:22:35 -07002573 writel_relaxed(type, iatu_base + iatu_ctrl1_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002574 writel_relaxed(lower_32_bits(host_addr),
Tony Truongf49801f2017-10-25 11:22:35 -07002575 iatu_base + iatu_lbar_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002576 writel_relaxed(upper_32_bits(host_addr),
Tony Truongf49801f2017-10-25 11:22:35 -07002577 iatu_base + iatu_ubar_offset);
2578 writel_relaxed(host_end, iatu_base + iatu_lar_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002579 writel_relaxed(lower_32_bits(target_addr),
Tony Truongf49801f2017-10-25 11:22:35 -07002580 iatu_base + iatu_ltar_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002581 writel_relaxed(upper_32_bits(target_addr),
Tony Truongf49801f2017-10-25 11:22:35 -07002582 iatu_base + iatu_utar_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002583 /* ensure that changes propagated to the hardware */
2584 wmb();
Tony Truongf49801f2017-10-25 11:22:35 -07002585 writel_relaxed(BIT(31), iatu_base + iatu_ctrl2_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002586
2587 /* ensure that changes propagated to the hardware */
2588 wmb();
2589
2590 if (dev->enumerated) {
2591 PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
2592 dev->pcidev_table[nr].bdf >> 24,
2593 dev->pcidev_table[nr].bdf >> 19 & 0x1f,
2594 dev->pcidev_table[nr].bdf >> 16 & 0x07);
Tony Truongf49801f2017-10-25 11:22:35 -07002595 if (iatu_viewport_offset)
2596 PCIE_DBG2(dev, "IATU_VIEWPORT:0x%x\n",
2597 readl_relaxed(dev->dm_core +
2598 PCIE20_PLR_IATU_VIEWPORT));
2599 PCIE_DBG2(dev, "IATU_CTRL1:0x%x\n",
2600 readl_relaxed(iatu_base + iatu_ctrl1_offset));
2601 PCIE_DBG2(dev, "IATU_LBAR:0x%x\n",
2602 readl_relaxed(iatu_base + iatu_lbar_offset));
2603 PCIE_DBG2(dev, "IATU_UBAR:0x%x\n",
2604 readl_relaxed(iatu_base + iatu_ubar_offset));
2605 PCIE_DBG2(dev, "IATU_LAR:0x%x\n",
2606 readl_relaxed(iatu_base + iatu_lar_offset));
2607 PCIE_DBG2(dev, "IATU_LTAR:0x%x\n",
2608 readl_relaxed(iatu_base + iatu_ltar_offset));
2609 PCIE_DBG2(dev, "IATU_UTAR:0x%x\n",
2610 readl_relaxed(iatu_base + iatu_utar_offset));
2611 PCIE_DBG2(dev, "IATU_CTRL2:0x%x\n\n",
2612 readl_relaxed(iatu_base + iatu_ctrl2_offset));
Tony Truong349ee492014-10-01 17:35:56 -07002613 }
2614}
2615
2616/**
2617 * msm_pcie_cfg_bdf - configure for config access
2618 * @dev: root commpex
2619 * @bus: PCI bus number
2620 * @devfn: PCI dev and function number
2621 *
2622 * Remap if required region 0 for config access of proper type
2623 * (CFG0 for bus 1, CFG1 for other buses)
2624 * Cache current device bdf for speed-up
2625 */
2626static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
2627{
2628 struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
2629 u32 bdf = BDF_OFFSET(bus, devfn);
2630 u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
2631
2632 if (dev->current_bdf == bdf)
2633 return;
2634
2635 msm_pcie_iatu_config(dev, 0, type,
2636 axi_conf->start,
2637 axi_conf->start + SZ_4K - 1,
2638 bdf);
2639
2640 dev->current_bdf = bdf;
2641}
2642
2643static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
2644 u32 word_offset, u32 wr_val,
2645 u32 bdf, bool rc)
2646{
2647 int i, j;
2648 u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
2649
2650 if (rc) {
2651 dev->rc_shadow[word_offset / 4] = wr_val;
2652 } else {
2653 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2654 if (!dev->pcidev_table[i].bdf) {
2655 for (j = 0; j < max_dev; j++)
2656 if (!msm_pcie_dev_tbl[j].bdf) {
2657 msm_pcie_dev_tbl[j].bdf = bdf;
2658 break;
2659 }
2660 dev->pcidev_table[i].bdf = bdf;
2661 if ((!dev->bridge_found) && (i > 0))
2662 dev->bridge_found = true;
2663 }
2664 if (dev->pcidev_table[i].bdf == bdf) {
2665 dev->ep_shadow[i][word_offset / 4] = wr_val;
2666 break;
2667 }
2668 }
2669 }
2670}
2671
2672static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
2673 int where, int size, u32 *val)
2674{
2675 uint32_t word_offset, byte_offset, mask;
2676 uint32_t rd_val, wr_val;
2677 struct msm_pcie_dev_t *dev;
2678 void __iomem *config_base;
2679 bool rc = false;
2680 u32 rc_idx;
2681 int rv = 0;
2682 u32 bdf = BDF_OFFSET(bus->number, devfn);
2683 int i;
2684
2685 dev = PCIE_BUS_PRIV_DATA(bus);
2686
2687 if (!dev) {
2688 pr_err("PCIe: No device found for this bus.\n");
2689 *val = ~0;
2690 rv = PCIBIOS_DEVICE_NOT_FOUND;
2691 goto out;
2692 }
2693
2694 rc_idx = dev->rc_idx;
2695 rc = (bus->number == 0);
2696
2697 spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
2698
2699 if (!dev->cfg_access) {
2700 PCIE_DBG3(dev,
2701 "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
2702 rc_idx, bus->number, devfn, where, size);
2703 *val = ~0;
2704 rv = PCIBIOS_DEVICE_NOT_FOUND;
2705 goto unlock;
2706 }
2707
2708 if (rc && (devfn != 0)) {
2709 PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
2710 (oper == RD) ? "rd" : "wr", bus->number, devfn);
2711 *val = ~0;
2712 rv = PCIBIOS_DEVICE_NOT_FOUND;
2713 goto unlock;
2714 }
2715
2716 if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
2717 PCIE_DBG3(dev,
2718 "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
2719 rc_idx, bus->number, devfn, where, size);
2720 *val = ~0;
2721 rv = PCIBIOS_DEVICE_NOT_FOUND;
2722 goto unlock;
2723 }
2724
2725 /* check if the link is up for endpoint */
2726 if (!rc && !msm_pcie_is_link_up(dev)) {
2727 PCIE_ERR(dev,
2728 "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
2729 rc_idx, (oper == RD) ? "rd" : "wr",
2730 bus->number, devfn);
2731 *val = ~0;
2732 rv = PCIBIOS_DEVICE_NOT_FOUND;
2733 goto unlock;
2734 }
2735
2736 if (!rc && !dev->enumerated)
2737 msm_pcie_cfg_bdf(dev, bus->number, devfn);
2738
2739 word_offset = where & ~0x3;
2740 byte_offset = where & 0x3;
Tony Truonge48ec872017-03-14 12:47:58 -07002741 mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002742
2743 if (rc || !dev->enumerated) {
2744 config_base = rc ? dev->dm_core : dev->conf;
2745 } else {
2746 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2747 if (dev->pcidev_table[i].bdf == bdf) {
2748 config_base = dev->pcidev_table[i].conf_base;
2749 break;
2750 }
2751 }
2752 if (i == MAX_DEVICE_NUM) {
2753 *val = ~0;
2754 rv = PCIBIOS_DEVICE_NOT_FOUND;
2755 goto unlock;
2756 }
2757 }
2758
2759 rd_val = readl_relaxed(config_base + word_offset);
2760
2761 if (oper == RD) {
2762 *val = ((rd_val & mask) >> (8 * byte_offset));
2763 PCIE_DBG3(dev,
2764 "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
2765 rc_idx, bus->number, devfn, where, size, *val, rd_val);
2766 } else {
2767 wr_val = (rd_val & ~mask) |
2768 ((*val << (8 * byte_offset)) & mask);
2769
2770 if ((bus->number == 0) && (where == 0x3c))
2771 wr_val = wr_val | (3 << 16);
2772
2773 writel_relaxed(wr_val, config_base + word_offset);
2774 wmb(); /* ensure config data is written to hardware register */
2775
Tony Truonge48ec872017-03-14 12:47:58 -07002776 if (dev->shadow_en) {
2777 if (rd_val == PCIE_LINK_DOWN &&
2778 (readl_relaxed(config_base) == PCIE_LINK_DOWN))
2779 PCIE_ERR(dev,
2780 "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
2781 rc_idx, bus->number, devfn,
2782 where, size);
2783 else
2784 msm_pcie_save_shadow(dev, word_offset, wr_val,
2785 bdf, rc);
2786 }
Tony Truong349ee492014-10-01 17:35:56 -07002787
2788 PCIE_DBG3(dev,
2789 "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
2790 rc_idx, bus->number, devfn, where, size,
2791 wr_val, rd_val, *val);
2792 }
2793
2794unlock:
2795 spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
2796out:
2797 return rv;
2798}
2799
2800static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
2801 int size, u32 *val)
2802{
2803 int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
2804
2805 if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
2806 *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
2807 PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
2808 }
2809
2810 return ret;
2811}
2812
2813static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2814 int where, int size, u32 val)
2815{
2816 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
2817}
2818
2819static struct pci_ops msm_pcie_ops = {
2820 .read = msm_pcie_rd_conf,
2821 .write = msm_pcie_wr_conf,
2822};
2823
2824static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
2825{
2826 int rc = 0, i;
2827 struct msm_pcie_gpio_info_t *info;
2828
2829 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2830
2831 for (i = 0; i < dev->gpio_n; i++) {
2832 info = &dev->gpio[i];
2833
2834 if (!info->num)
2835 continue;
2836
2837 rc = gpio_request(info->num, info->name);
2838 if (rc) {
2839 PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
2840 dev->rc_idx, info->name, rc);
2841 break;
2842 }
2843
2844 if (info->out)
2845 rc = gpio_direction_output(info->num, info->init);
2846 else
2847 rc = gpio_direction_input(info->num);
2848 if (rc) {
2849 PCIE_ERR(dev,
2850 "PCIe: RC%d can't set direction for GPIO %s:%d\n",
2851 dev->rc_idx, info->name, rc);
2852 gpio_free(info->num);
2853 break;
2854 }
2855 }
2856
2857 if (rc)
2858 while (i--)
2859 gpio_free(dev->gpio[i].num);
2860
2861 return rc;
2862}
2863
2864static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
2865{
2866 int i;
2867
2868 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2869
2870 for (i = 0; i < dev->gpio_n; i++)
2871 gpio_free(dev->gpio[i].num);
2872}
2873
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002874static int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07002875{
2876 int i, rc = 0;
2877 struct regulator *vreg;
2878 struct msm_pcie_vreg_info_t *info;
2879
2880 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2881
2882 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
2883 info = &dev->vreg[i];
2884 vreg = info->hdl;
2885
2886 if (!vreg)
2887 continue;
2888
2889 PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
2890 dev->rc_idx, info->name);
2891 if (info->max_v) {
2892 rc = regulator_set_voltage(vreg,
2893 info->min_v, info->max_v);
2894 if (rc) {
2895 PCIE_ERR(dev,
2896 "PCIe: RC%d can't set voltage for %s: %d\n",
2897 dev->rc_idx, info->name, rc);
2898 break;
2899 }
2900 }
2901
2902 if (info->opt_mode) {
2903 rc = regulator_set_load(vreg, info->opt_mode);
2904 if (rc < 0) {
2905 PCIE_ERR(dev,
2906 "PCIe: RC%d can't set mode for %s: %d\n",
2907 dev->rc_idx, info->name, rc);
2908 break;
2909 }
2910 }
2911
2912 rc = regulator_enable(vreg);
2913 if (rc) {
2914 PCIE_ERR(dev,
2915 "PCIe: RC%d can't enable regulator %s: %d\n",
2916 dev->rc_idx, info->name, rc);
2917 break;
2918 }
2919 }
2920
2921 if (rc)
2922 while (i--) {
2923 struct regulator *hdl = dev->vreg[i].hdl;
2924
2925 if (hdl) {
2926 regulator_disable(hdl);
2927 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
2928 PCIE_DBG(dev,
2929 "RC%d: Removing %s vote.\n",
2930 dev->rc_idx,
2931 dev->vreg[i].name);
2932 regulator_set_voltage(hdl,
Tony Truongb213ac12017-04-05 15:21:20 -07002933 RPMH_REGULATOR_LEVEL_OFF,
2934 RPMH_REGULATOR_LEVEL_MAX);
Tony Truong349ee492014-10-01 17:35:56 -07002935 }
2936 }
2937
2938 }
2939
2940 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2941
2942 return rc;
2943}
2944
2945static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
2946{
2947 int i;
2948
2949 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2950
2951 for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
2952 if (dev->vreg[i].hdl) {
2953 PCIE_DBG(dev, "Vreg %s is being disabled\n",
2954 dev->vreg[i].name);
2955 regulator_disable(dev->vreg[i].hdl);
2956
2957 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
2958 PCIE_DBG(dev,
2959 "RC%d: Removing %s vote.\n",
2960 dev->rc_idx,
2961 dev->vreg[i].name);
2962 regulator_set_voltage(dev->vreg[i].hdl,
Tony Truongb213ac12017-04-05 15:21:20 -07002963 RPMH_REGULATOR_LEVEL_OFF,
2964 RPMH_REGULATOR_LEVEL_MAX);
Tony Truong349ee492014-10-01 17:35:56 -07002965 }
2966 }
2967 }
2968
2969 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2970}
2971
2972static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
2973{
2974 int i, rc = 0;
2975 struct msm_pcie_clk_info_t *info;
2976 struct msm_pcie_reset_info_t *reset_info;
2977
2978 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2979
2980 rc = regulator_enable(dev->gdsc);
2981
2982 if (rc) {
2983 PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
2984 dev->rc_idx, dev->pdev->name);
2985 return rc;
2986 }
2987
2988 if (dev->gdsc_smmu) {
2989 rc = regulator_enable(dev->gdsc_smmu);
2990
2991 if (rc) {
2992 PCIE_ERR(dev,
2993 "PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
2994 dev->rc_idx, dev->pdev->name);
2995 return rc;
2996 }
2997 }
2998
2999 PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
3000 if (dev->bus_client) {
3001 rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
3002 if (rc) {
3003 PCIE_ERR(dev,
3004 "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
3005 dev->rc_idx, rc);
3006 return rc;
3007 }
3008
3009 PCIE_DBG2(dev,
3010 "PCIe: set bus bandwidth for RC%d.\n",
3011 dev->rc_idx);
3012 }
3013
3014 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3015 info = &dev->clk[i];
3016
3017 if (!info->hdl)
3018 continue;
3019
3020 if (info->config_mem)
3021 msm_pcie_config_clock_mem(dev, info);
3022
3023 if (info->freq) {
3024 rc = clk_set_rate(info->hdl, info->freq);
3025 if (rc) {
3026 PCIE_ERR(dev,
3027 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3028 dev->rc_idx, info->name, rc);
3029 break;
3030 }
3031
3032 PCIE_DBG2(dev,
3033 "PCIe: RC%d set rate for clk %s.\n",
3034 dev->rc_idx, info->name);
3035 }
3036
3037 rc = clk_prepare_enable(info->hdl);
3038
3039 if (rc)
3040 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
3041 dev->rc_idx, info->name);
3042 else
3043 PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
3044 info->name, dev->rc_idx);
3045 }
3046
3047 if (rc) {
3048 PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
3049 dev->rc_idx);
3050 while (i--) {
3051 struct clk *hdl = dev->clk[i].hdl;
3052
3053 if (hdl)
3054 clk_disable_unprepare(hdl);
3055 }
3056
3057 if (dev->gdsc_smmu)
3058 regulator_disable(dev->gdsc_smmu);
3059
3060 regulator_disable(dev->gdsc);
3061 }
3062
3063 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3064 reset_info = &dev->reset[i];
3065 if (reset_info->hdl) {
Tony Truongc21087d2017-05-01 17:58:06 -07003066 rc = reset_control_assert(reset_info->hdl);
3067 if (rc)
3068 PCIE_ERR(dev,
3069 "PCIe: RC%d failed to assert reset for %s.\n",
3070 dev->rc_idx, reset_info->name);
3071 else
3072 PCIE_DBG2(dev,
3073 "PCIe: RC%d successfully asserted reset for %s.\n",
3074 dev->rc_idx, reset_info->name);
3075
3076 /* add a 1ms delay to ensure the reset is asserted */
3077 usleep_range(1000, 1005);
3078
Tony Truong349ee492014-10-01 17:35:56 -07003079 rc = reset_control_deassert(reset_info->hdl);
3080 if (rc)
3081 PCIE_ERR(dev,
3082 "PCIe: RC%d failed to deassert reset for %s.\n",
3083 dev->rc_idx, reset_info->name);
3084 else
3085 PCIE_DBG2(dev,
3086 "PCIe: RC%d successfully deasserted reset for %s.\n",
3087 dev->rc_idx, reset_info->name);
3088 }
3089 }
3090
3091 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3092
3093 return rc;
3094}
3095
3096static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
3097{
3098 int i;
3099 int rc;
3100
3101 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3102
3103 for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
3104 if (dev->clk[i].hdl)
3105 clk_disable_unprepare(dev->clk[i].hdl);
3106
3107 if (dev->bus_client) {
3108 PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
3109 dev->rc_idx);
3110
3111 rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
3112 if (rc)
3113 PCIE_ERR(dev,
3114 "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
3115 dev->rc_idx, rc);
3116 else
3117 PCIE_DBG(dev,
3118 "PCIe: relinquish bus bandwidth for RC%d.\n",
3119 dev->rc_idx);
3120 }
3121
3122 if (dev->gdsc_smmu)
3123 regulator_disable(dev->gdsc_smmu);
3124
3125 regulator_disable(dev->gdsc);
3126
3127 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3128}
3129
3130static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
3131{
3132 int i, rc = 0;
3133 struct msm_pcie_clk_info_t *info;
3134 struct msm_pcie_reset_info_t *pipe_reset_info;
3135
3136 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3137
3138 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3139 info = &dev->pipeclk[i];
3140
3141 if (!info->hdl)
3142 continue;
3143
3144
3145 if (info->config_mem)
3146 msm_pcie_config_clock_mem(dev, info);
3147
3148 if (info->freq) {
3149 rc = clk_set_rate(info->hdl, info->freq);
3150 if (rc) {
3151 PCIE_ERR(dev,
3152 "PCIe: RC%d can't set rate for clk %s: %d.\n",
3153 dev->rc_idx, info->name, rc);
3154 break;
3155 }
3156
3157 PCIE_DBG2(dev,
3158 "PCIe: RC%d set rate for clk %s: %d.\n",
3159 dev->rc_idx, info->name, rc);
3160 }
3161
3162 rc = clk_prepare_enable(info->hdl);
3163
3164 if (rc)
3165 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
3166 dev->rc_idx, info->name);
3167 else
3168 PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
3169 dev->rc_idx, info->name);
3170 }
3171
3172 if (rc) {
3173 PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
3174 dev->rc_idx);
3175 while (i--)
3176 if (dev->pipeclk[i].hdl)
3177 clk_disable_unprepare(dev->pipeclk[i].hdl);
3178 }
3179
3180 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3181 pipe_reset_info = &dev->pipe_reset[i];
3182 if (pipe_reset_info->hdl) {
Tony Truongc21087d2017-05-01 17:58:06 -07003183 rc = reset_control_assert(pipe_reset_info->hdl);
3184 if (rc)
3185 PCIE_ERR(dev,
3186 "PCIe: RC%d failed to assert pipe reset for %s.\n",
3187 dev->rc_idx, pipe_reset_info->name);
3188 else
3189 PCIE_DBG2(dev,
3190 "PCIe: RC%d successfully asserted pipe reset for %s.\n",
3191 dev->rc_idx, pipe_reset_info->name);
3192
3193 /* add a 1ms delay to ensure the reset is asserted */
3194 usleep_range(1000, 1005);
3195
Tony Truong349ee492014-10-01 17:35:56 -07003196 rc = reset_control_deassert(
3197 pipe_reset_info->hdl);
3198 if (rc)
3199 PCIE_ERR(dev,
3200 "PCIe: RC%d failed to deassert pipe reset for %s.\n",
3201 dev->rc_idx, pipe_reset_info->name);
3202 else
3203 PCIE_DBG2(dev,
3204 "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
3205 dev->rc_idx, pipe_reset_info->name);
3206 }
3207 }
3208
3209 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3210
3211 return rc;
3212}
3213
3214static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
3215{
3216 int i;
3217
3218 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3219
3220 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
3221 if (dev->pipeclk[i].hdl)
3222 clk_disable_unprepare(
3223 dev->pipeclk[i].hdl);
3224
3225 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3226}
3227
3228static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
3229{
3230 int i;
3231 u8 type;
3232 struct msm_pcie_device_info *dev_table = dev->pcidev_table;
3233
3234 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3235 if (!dev_table[i].bdf)
3236 break;
3237
3238 type = dev_table[i].bdf >> 24 == 0x1 ?
3239 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
3240
3241 msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
3242 dev_table[i].phy_address + SZ_4K - 1,
3243 dev_table[i].bdf);
3244 }
3245}
3246
3247static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
3248{
Tony Truong349ee492014-10-01 17:35:56 -07003249 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3250
3251 /*
3252 * program and enable address translation region 0 (device config
3253 * address space); region type config;
3254 * axi config address range to device config address range
3255 */
3256 if (dev->enumerated) {
3257 msm_pcie_iatu_config_all_ep(dev);
3258 } else {
3259 dev->current_bdf = 0; /* to force IATU re-config */
3260 msm_pcie_cfg_bdf(dev, 1, 0);
3261 }
3262
3263 /* configure N_FTS */
3264 PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3265 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3266 if (!dev->n_fts)
3267 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3268 0, BIT(15));
3269 else
3270 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3271 PCIE20_ACK_N_FTS,
3272 dev->n_fts << 8);
3273
3274 if (dev->shadow_en)
3275 dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
3276 readl_relaxed(dev->dm_core +
3277 PCIE20_ACK_F_ASPM_CTRL_REG);
3278
3279 PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3280 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3281
3282 /* configure AUX clock frequency register for PCIe core */
3283 if (dev->use_19p2mhz_aux_clk)
3284 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
3285 else
3286 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
3287
3288 /* configure the completion timeout value for PCIe core */
3289 if (dev->cpl_timeout && dev->bridge_found)
3290 msm_pcie_write_reg_field(dev->dm_core,
3291 PCIE20_DEVICE_CONTROL2_STATUS2,
3292 0xf, dev->cpl_timeout);
3293
3294 /* Enable AER on RC */
3295 if (dev->aer_enable) {
3296 msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
3297 BIT(16)|BIT(17));
3298 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
3299 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3300
3301 PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3302 readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
3303 }
Tony Truong349ee492014-10-01 17:35:56 -07003304}
3305
Stephen Boydb5b8fc32017-06-21 08:59:11 -07003306static void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07003307{
3308 int i;
3309
3310 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3311
3312 /* program MSI controller and enable all interrupts */
3313 writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
3314 writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
3315
3316 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
3317 writel_relaxed(~0, dev->dm_core +
3318 PCIE20_MSI_CTRL_INTR_EN + (i * 12));
3319
3320 /* ensure that hardware is configured before proceeding */
3321 wmb();
3322}
3323
3324static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
3325 struct platform_device *pdev)
3326{
3327 int i, len, cnt, ret = 0, size = 0;
3328 struct msm_pcie_vreg_info_t *vreg_info;
3329 struct msm_pcie_gpio_info_t *gpio_info;
3330 struct msm_pcie_clk_info_t *clk_info;
3331 struct resource *res;
3332 struct msm_pcie_res_info_t *res_info;
3333 struct msm_pcie_irq_info_t *irq_info;
3334 struct msm_pcie_irq_info_t *msi_info;
3335 struct msm_pcie_reset_info_t *reset_info;
3336 struct msm_pcie_reset_info_t *pipe_reset_info;
3337 char prop_name[MAX_PROP_SIZE];
3338 const __be32 *prop;
3339 u32 *clkfreq = NULL;
3340
3341 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3342
Tony Truong2fdfe8d02017-12-08 12:26:11 -08003343 cnt = of_property_count_elems_of_size((&pdev->dev)->of_node,
3344 "max-clock-frequency-hz", sizeof(u32));
Tony Truong349ee492014-10-01 17:35:56 -07003345 if (cnt > 0) {
3346 clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
3347 sizeof(*clkfreq), GFP_KERNEL);
3348 if (!clkfreq) {
3349 PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
3350 dev->rc_idx);
3351 return -ENOMEM;
3352 }
3353 ret = of_property_read_u32_array(
3354 (&pdev->dev)->of_node,
3355 "max-clock-frequency-hz", clkfreq, cnt);
3356 if (ret) {
3357 PCIE_ERR(dev,
3358 "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
3359 dev->rc_idx, ret);
3360 goto out;
3361 }
3362 }
3363
3364 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
3365 vreg_info = &dev->vreg[i];
3366 vreg_info->hdl =
3367 devm_regulator_get(&pdev->dev, vreg_info->name);
3368
3369 if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
3370 PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
3371 vreg_info->name);
3372 ret = PTR_ERR(vreg_info->hdl);
3373 goto out;
3374 }
3375
3376 if (IS_ERR(vreg_info->hdl)) {
3377 if (vreg_info->required) {
3378 PCIE_DBG(dev, "Vreg %s doesn't exist\n",
3379 vreg_info->name);
3380 ret = PTR_ERR(vreg_info->hdl);
3381 goto out;
3382 } else {
3383 PCIE_DBG(dev,
3384 "Optional Vreg %s doesn't exist\n",
3385 vreg_info->name);
3386 vreg_info->hdl = NULL;
3387 }
3388 } else {
3389 dev->vreg_n++;
3390 snprintf(prop_name, MAX_PROP_SIZE,
3391 "qcom,%s-voltage-level", vreg_info->name);
3392 prop = of_get_property((&pdev->dev)->of_node,
3393 prop_name, &len);
3394 if (!prop || (len != (3 * sizeof(__be32)))) {
3395 PCIE_DBG(dev, "%s %s property\n",
3396 prop ? "invalid format" :
3397 "no", prop_name);
3398 } else {
3399 vreg_info->max_v = be32_to_cpup(&prop[0]);
3400 vreg_info->min_v = be32_to_cpup(&prop[1]);
3401 vreg_info->opt_mode =
3402 be32_to_cpup(&prop[2]);
3403 }
3404 }
3405 }
3406
3407 dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
3408
3409 if (IS_ERR(dev->gdsc)) {
3410 PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
3411 dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
3412 if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
3413 PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
3414 dev->pdev->name);
3415 ret = PTR_ERR(dev->gdsc);
3416 goto out;
3417 }
3418
3419 dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
3420
3421 if (IS_ERR(dev->gdsc_smmu)) {
3422 PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
3423 dev->rc_idx);
3424 dev->gdsc_smmu = NULL;
3425 }
3426
3427 dev->gpio_n = 0;
3428 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
3429 gpio_info = &dev->gpio[i];
3430 ret = of_get_named_gpio((&pdev->dev)->of_node,
3431 gpio_info->name, 0);
3432 if (ret >= 0) {
3433 gpio_info->num = ret;
3434 dev->gpio_n++;
3435 PCIE_DBG(dev, "GPIO num for %s is %d\n",
3436 gpio_info->name, gpio_info->num);
3437 } else {
3438 if (gpio_info->required) {
3439 PCIE_ERR(dev,
3440 "Could not get required GPIO %s\n",
3441 gpio_info->name);
3442 goto out;
3443 } else {
3444 PCIE_DBG(dev,
3445 "Could not get optional GPIO %s\n",
3446 gpio_info->name);
3447 }
3448 }
3449 ret = 0;
3450 }
3451
3452 of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
3453 if (size) {
3454 dev->phy_sequence = (struct msm_pcie_phy_info_t *)
3455 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
3456
3457 if (dev->phy_sequence) {
3458 dev->phy_len =
3459 size / sizeof(*dev->phy_sequence);
3460
3461 of_property_read_u32_array(pdev->dev.of_node,
3462 "qcom,phy-sequence",
3463 (unsigned int *)dev->phy_sequence,
3464 size / sizeof(dev->phy_sequence->offset));
3465 } else {
3466 PCIE_ERR(dev,
3467 "RC%d: Could not allocate memory for phy init sequence.\n",
3468 dev->rc_idx);
3469 ret = -ENOMEM;
3470 goto out;
3471 }
3472 } else {
3473 PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
3474 dev->rc_idx);
3475 }
3476
Tony Truong349ee492014-10-01 17:35:56 -07003477 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3478 clk_info = &dev->clk[i];
3479
3480 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
3481
3482 if (IS_ERR(clk_info->hdl)) {
3483 if (clk_info->required) {
3484 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
3485 clk_info->name, PTR_ERR(clk_info->hdl));
3486 ret = PTR_ERR(clk_info->hdl);
3487 goto out;
3488 } else {
3489 PCIE_DBG(dev, "Ignoring Clock %s\n",
3490 clk_info->name);
3491 clk_info->hdl = NULL;
3492 }
3493 } else {
3494 if (clkfreq != NULL) {
3495 clk_info->freq = clkfreq[i +
3496 MSM_PCIE_MAX_PIPE_CLK];
3497 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
3498 clk_info->name, clk_info->freq);
3499 }
3500 }
3501 }
3502
3503 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3504 clk_info = &dev->pipeclk[i];
3505
3506 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
3507
3508 if (IS_ERR(clk_info->hdl)) {
3509 if (clk_info->required) {
3510 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
3511 clk_info->name, PTR_ERR(clk_info->hdl));
3512 ret = PTR_ERR(clk_info->hdl);
3513 goto out;
3514 } else {
3515 PCIE_DBG(dev, "Ignoring Clock %s\n",
3516 clk_info->name);
3517 clk_info->hdl = NULL;
3518 }
3519 } else {
3520 if (clkfreq != NULL) {
3521 clk_info->freq = clkfreq[i];
3522 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
3523 clk_info->name, clk_info->freq);
3524 }
3525 }
3526 }
3527
3528 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3529 reset_info = &dev->reset[i];
3530
3531 reset_info->hdl = devm_reset_control_get(&pdev->dev,
3532 reset_info->name);
3533
3534 if (IS_ERR(reset_info->hdl)) {
3535 if (reset_info->required) {
3536 PCIE_DBG(dev,
3537 "Reset %s isn't available:%ld\n",
3538 reset_info->name,
3539 PTR_ERR(reset_info->hdl));
3540
3541 ret = PTR_ERR(reset_info->hdl);
3542 reset_info->hdl = NULL;
3543 goto out;
3544 } else {
3545 PCIE_DBG(dev, "Ignoring Reset %s\n",
3546 reset_info->name);
3547 reset_info->hdl = NULL;
3548 }
3549 }
3550 }
3551
3552 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3553 pipe_reset_info = &dev->pipe_reset[i];
3554
3555 pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
3556 pipe_reset_info->name);
3557
3558 if (IS_ERR(pipe_reset_info->hdl)) {
3559 if (pipe_reset_info->required) {
3560 PCIE_DBG(dev,
3561 "Pipe Reset %s isn't available:%ld\n",
3562 pipe_reset_info->name,
3563 PTR_ERR(pipe_reset_info->hdl));
3564
3565 ret = PTR_ERR(pipe_reset_info->hdl);
3566 pipe_reset_info->hdl = NULL;
3567 goto out;
3568 } else {
3569 PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
3570 pipe_reset_info->name);
3571 pipe_reset_info->hdl = NULL;
3572 }
3573 }
3574 }
3575
3576 dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3577 if (!dev->bus_scale_table) {
3578 PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
3579 dev->rc_idx, dev->pdev->name);
3580 dev->bus_client = 0;
3581 } else {
3582 dev->bus_client =
3583 msm_bus_scale_register_client(dev->bus_scale_table);
3584 if (!dev->bus_client) {
3585 PCIE_ERR(dev,
3586 "PCIe: Failed to register bus client for RC%d (%s)\n",
3587 dev->rc_idx, dev->pdev->name);
3588 msm_bus_cl_clear_pdata(dev->bus_scale_table);
3589 ret = -ENODEV;
3590 goto out;
3591 }
3592 }
3593
3594 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
3595 res_info = &dev->res[i];
3596
3597 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3598 res_info->name);
3599
3600 if (!res) {
3601 PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
3602 dev->rc_idx, res_info->name);
3603 } else {
3604 PCIE_DBG(dev, "start addr for %s is %pa.\n",
3605 res_info->name, &res->start);
3606
3607 res_info->base = devm_ioremap(&pdev->dev,
3608 res->start, resource_size(res));
3609 if (!res_info->base) {
3610 PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
3611 dev->rc_idx, res_info->name);
3612 ret = -ENOMEM;
3613 goto out;
3614 } else {
3615 res_info->resource = res;
3616 }
3617 }
3618 }
3619
3620 for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
3621 irq_info = &dev->irq[i];
3622
3623 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
3624 irq_info->name);
3625
3626 if (!res) {
3627 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
3628 dev->rc_idx, irq_info->name);
3629 } else {
3630 irq_info->num = res->start;
3631 PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
3632 irq_info->num);
3633 }
3634 }
3635
3636 for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
3637 msi_info = &dev->msi[i];
3638
3639 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
3640 msi_info->name);
3641
3642 if (!res) {
3643 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
3644 dev->rc_idx, msi_info->name);
3645 } else {
3646 msi_info->num = res->start;
3647 PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
3648 msi_info->num);
3649 }
3650 }
3651
3652 /* All allocations succeeded */
3653
3654 if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
3655 dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
3656 else
3657 dev->wake_n = 0;
3658
3659 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
3660 dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
3661 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
Tony Truongf49801f2017-10-25 11:22:35 -07003662 dev->iatu = dev->res[MSM_PCIE_RES_IATU].base;
Tony Truong349ee492014-10-01 17:35:56 -07003663 dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
3664 dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
3665 dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
3666 dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
3667 dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
3668 dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
3669 dev->dev_io_res->flags = IORESOURCE_IO;
3670
3671out:
3672 kfree(clkfreq);
3673
3674 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3675
3676 return ret;
3677}
3678
3679static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
3680{
3681 dev->parf = NULL;
3682 dev->elbi = NULL;
Tony Truongf49801f2017-10-25 11:22:35 -07003683 dev->iatu = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07003684 dev->dm_core = NULL;
3685 dev->conf = NULL;
3686 dev->bars = NULL;
3687 dev->tcsr = NULL;
3688 dev->dev_mem_res = NULL;
3689 dev->dev_io_res = NULL;
3690}
3691
Tony Truongc275fe02017-04-18 19:04:20 -07003692static void msm_pcie_setup_gen3(struct msm_pcie_dev_t *dev)
3693{
3694 PCIE_DBG(dev, "PCIe: RC%d: Setting up Gen3\n", dev->rc_idx);
3695
3696 msm_pcie_write_reg_field(dev->dm_core,
3697 PCIE_GEN3_L0_DRVR_CTRL0, 0x1ff00, BIT(0));
3698
3699 msm_pcie_write_reg(dev->dm_core,
3700 PCIE_GEN3_L0_BIST_ERR_CNT2_STATUS,
3701 (0x05 << 14) | (0x05 << 10) | (0x0d << 5));
3702
3703 msm_pcie_write_mask(dev->dm_core +
3704 PCIE_GEN3_L0_BIST_ERR_CNT1_STATUS, BIT(4), 0);
3705
3706 msm_pcie_write_mask(dev->dm_core +
3707 PCIE_GEN3_L0_RESET_GEN, BIT(0), 0);
3708
3709 /* configure PCIe preset */
3710 msm_pcie_write_reg(dev->dm_core,
3711 PCIE_GEN3_L0_DEBUG_BUS_STATUS4, 1);
3712 msm_pcie_write_reg(dev->dm_core,
3713 PCIE_GEN3_COM_INTEGLOOP_GAIN1_MODE0, 0x77777777);
3714 msm_pcie_write_reg(dev->dm_core,
3715 PCIE_GEN3_L0_DEBUG_BUS_STATUS4, 1);
3716
3717 msm_pcie_write_reg_field(dev->dm_core,
3718 PCIE20_CAP + PCI_EXP_LNKCTL2,
3719 PCI_EXP_LNKCAP_SLS, GEN3_SPEED);
3720}
3721
Stephen Boydb5b8fc32017-06-21 08:59:11 -07003722static int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
Tony Truong349ee492014-10-01 17:35:56 -07003723{
3724 int ret = 0;
3725 uint32_t val;
3726 long int retries = 0;
3727 int link_check_count = 0;
Tony Truong74ee0fd2017-10-06 19:37:43 -07003728 unsigned long ep_up_timeout = 0;
Tony Truongbad3b742017-11-22 14:40:19 -08003729 u32 link_check_max_count;
Tony Truong349ee492014-10-01 17:35:56 -07003730
3731 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3732
3733 mutex_lock(&dev->setup_lock);
3734
3735 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
3736 PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
3737 dev->rc_idx);
3738 goto out;
3739 }
3740
3741 /* assert PCIe reset link to keep EP in reset */
3742
3743 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3744 dev->rc_idx);
3745 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3746 dev->gpio[MSM_PCIE_GPIO_PERST].on);
3747 usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
3748 PERST_PROPAGATION_DELAY_US_MAX);
3749
3750 /* enable power */
3751
3752 if (options & PM_VREG) {
3753 ret = msm_pcie_vreg_init(dev);
3754 if (ret)
3755 goto out;
3756 }
3757
3758 /* enable clocks */
3759 if (options & PM_CLK) {
3760 ret = msm_pcie_clk_init(dev);
3761 /* ensure that changes propagated to the hardware */
3762 wmb();
3763 if (ret)
3764 goto clk_fail;
3765 }
3766
3767 if (dev->scm_dev_id) {
3768 PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
3769 msm_pcie_restore_sec_config(dev);
3770 }
3771
Tony Truongb213ac12017-04-05 15:21:20 -07003772 /* configure PCIe to RC mode */
3773 msm_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x4);
3774
3775 /* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
3776 if (dev->l1_supported)
3777 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
3778
Tony Truong349ee492014-10-01 17:35:56 -07003779 /* enable PCIe clocks and resets */
3780 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
3781
3782 /* change DBI base address */
3783 writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
3784
3785 writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
3786
3787 msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
3788 0, BIT(4));
3789
3790 /* enable selected IRQ */
3791 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
3792 msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
3793
3794 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
3795 BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
3796 BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
3797 BIT(MSM_PCIE_INT_EVT_AER_ERR) |
3798 BIT(MSM_PCIE_INT_EVT_MSI_0) |
3799 BIT(MSM_PCIE_INT_EVT_MSI_1) |
3800 BIT(MSM_PCIE_INT_EVT_MSI_2) |
3801 BIT(MSM_PCIE_INT_EVT_MSI_3) |
3802 BIT(MSM_PCIE_INT_EVT_MSI_4) |
3803 BIT(MSM_PCIE_INT_EVT_MSI_5) |
3804 BIT(MSM_PCIE_INT_EVT_MSI_6) |
3805 BIT(MSM_PCIE_INT_EVT_MSI_7));
3806
3807 PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
3808 dev->rc_idx,
3809 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
3810 }
3811
Tony Truong41e63ec2017-08-30 12:08:12 -07003812 writel_relaxed(dev->slv_addr_space_size, dev->parf +
3813 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
Tony Truong349ee492014-10-01 17:35:56 -07003814
3815 if (dev->use_msi) {
3816 PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
3817 val = dev->wr_halt_size ? dev->wr_halt_size :
3818 readl_relaxed(dev->parf +
3819 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
3820
3821 msm_pcie_write_reg(dev->parf,
3822 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
3823 BIT(31) | val);
3824
3825 PCIE_DBG(dev,
3826 "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
3827 dev->rc_idx,
3828 readl_relaxed(dev->parf +
3829 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
3830 }
3831
Tony Truong349ee492014-10-01 17:35:56 -07003832 /* init PCIe PHY */
Tony Truong232cf4d2017-08-22 18:28:24 -07003833 pcie_phy_init(dev);
Tony Truong349ee492014-10-01 17:35:56 -07003834
3835 if (options & PM_PIPE_CLK) {
3836 usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
3837 PHY_STABILIZATION_DELAY_US_MAX);
3838 /* Enable the pipe clock */
3839 ret = msm_pcie_pipe_clk_init(dev);
3840 /* ensure that changes propagated to the hardware */
3841 wmb();
3842 if (ret)
3843 goto link_fail;
3844 }
3845
Tony Truongc275fe02017-04-18 19:04:20 -07003846 /* check capability for max link speed */
3847 if (!dev->max_link_speed) {
3848 val = readl_relaxed(dev->dm_core + PCIE20_CAP + PCI_EXP_LNKCAP);
3849 dev->max_link_speed = val & PCI_EXP_LNKCAP_SLS;
3850 }
3851
Tony Truong349ee492014-10-01 17:35:56 -07003852 PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
3853
3854 do {
3855 if (pcie_phy_is_ready(dev))
3856 break;
3857 retries++;
3858 usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
3859 REFCLK_STABILIZATION_DELAY_US_MAX);
3860 } while (retries < PHY_READY_TIMEOUT_COUNT);
3861
3862 PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
3863 dev->rc_idx, retries);
3864
3865 if (pcie_phy_is_ready(dev))
3866 PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
3867 else {
3868 PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
3869 dev->rc_idx);
3870 ret = -ENODEV;
3871 pcie_phy_dump(dev);
3872 goto link_fail;
3873 }
3874
Tony Truong349ee492014-10-01 17:35:56 -07003875 if (dev->ep_latency)
3876 usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
3877
3878 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
3879 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
3880 dev->gpio[MSM_PCIE_GPIO_EP].on);
3881
3882 /* de-assert PCIe reset link to bring EP out of reset */
3883
3884 PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
3885 dev->rc_idx);
3886 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3887 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
3888 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
3889
Tony Truong74ee0fd2017-10-06 19:37:43 -07003890 ep_up_timeout = jiffies + usecs_to_jiffies(EP_UP_TIMEOUT_US);
3891
Tony Truongc275fe02017-04-18 19:04:20 -07003892 /* setup Gen3 specific configurations */
3893 if (dev->max_link_speed == GEN3_SPEED)
3894 msm_pcie_setup_gen3(dev);
3895
Tony Truongbad3b742017-11-22 14:40:19 -08003896 if (msm_pcie_force_gen1 & BIT(dev->rc_idx))
3897 msm_pcie_write_reg_field(dev->dm_core,
3898 PCIE20_CAP + PCI_EXP_LNKCTL2,
3899 PCI_EXP_LNKCAP_SLS, GEN1_SPEED);
3900
Tony Truong349ee492014-10-01 17:35:56 -07003901 /* set max tlp read size */
3902 msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
3903 0x7000, dev->tlp_rd_size);
3904
3905 /* enable link training */
3906 msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
3907
3908 PCIE_DBG(dev, "%s", "check if link is up\n");
3909
Tony Truongbad3b742017-11-22 14:40:19 -08003910 if (msm_pcie_link_check_max_count & BIT(dev->rc_idx))
3911 link_check_max_count = msm_pcie_link_check_max_count >> 4;
3912 else
3913 link_check_max_count = LINK_UP_CHECK_MAX_COUNT;
3914
Tony Truong349ee492014-10-01 17:35:56 -07003915 /* Wait for up to 100ms for the link to come up */
3916 do {
3917 usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
3918 val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
Tony Truongf9f5ff02017-03-29 11:28:44 -07003919 PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
3920 dev->rc_idx, (val >> 12) & 0x3f);
Tony Truong349ee492014-10-01 17:35:56 -07003921 } while ((!(val & XMLH_LINK_UP) ||
3922 !msm_pcie_confirm_linkup(dev, false, false, NULL))
Tony Truongbad3b742017-11-22 14:40:19 -08003923 && (link_check_count++ < link_check_max_count));
Tony Truong349ee492014-10-01 17:35:56 -07003924
3925 if ((val & XMLH_LINK_UP) &&
3926 msm_pcie_confirm_linkup(dev, false, false, NULL)) {
3927 PCIE_DBG(dev, "Link is up after %d checkings\n",
3928 link_check_count);
3929 PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
3930 } else {
3931 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3932 dev->rc_idx);
3933 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3934 dev->gpio[MSM_PCIE_GPIO_PERST].on);
3935 PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
3936 dev->rc_idx);
3937 ret = -1;
3938 goto link_fail;
3939 }
3940
Tony Truongab961342017-09-14 16:11:31 -07003941 dev->link_status = MSM_PCIE_LINK_ENABLED;
3942 dev->power_on = true;
3943 dev->suspending = false;
3944 dev->link_turned_on_counter++;
3945
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +05303946 if (dev->switch_latency) {
3947 PCIE_DBG(dev, "switch_latency: %dms\n",
3948 dev->switch_latency);
3949 if (dev->switch_latency <= SWITCH_DELAY_MAX)
3950 usleep_range(dev->switch_latency * 1000,
3951 dev->switch_latency * 1000);
3952 else
3953 msleep(dev->switch_latency);
3954 }
3955
Tony Truong349ee492014-10-01 17:35:56 -07003956 msm_pcie_config_controller(dev);
3957
Tony Truong74ee0fd2017-10-06 19:37:43 -07003958 /* check endpoint configuration space is accessible */
3959 while (time_before(jiffies, ep_up_timeout)) {
3960 if (readl_relaxed(dev->conf) != PCIE_LINK_DOWN)
3961 break;
3962 usleep_range(EP_UP_TIMEOUT_US_MIN, EP_UP_TIMEOUT_US_MAX);
3963 }
3964
3965 if (readl_relaxed(dev->conf) != PCIE_LINK_DOWN) {
3966 PCIE_DBG(dev,
3967 "PCIe: RC%d: endpoint config space is accessible\n",
3968 dev->rc_idx);
3969 } else {
3970 PCIE_ERR(dev,
3971 "PCIe: RC%d: endpoint config space is not accessible\n",
3972 dev->rc_idx);
3973 dev->link_status = MSM_PCIE_LINK_DISABLED;
3974 dev->power_on = false;
3975 dev->link_turned_off_counter++;
3976 ret = -ENODEV;
3977 goto link_fail;
3978 }
3979
Tony Truong349ee492014-10-01 17:35:56 -07003980 if (!dev->msi_gicm_addr)
3981 msm_pcie_config_msi_controller(dev);
3982
Tony Truongb1af8b62017-05-31 15:40:38 -07003983 if (dev->enumerated) {
Tony Truong7772e692017-04-13 17:03:34 -07003984 pci_walk_bus(dev->dev->bus, &msm_pcie_config_device, dev);
Tony Truongb1af8b62017-05-31 15:40:38 -07003985 msm_pcie_config_link_pm_rc(dev, dev->dev, true);
3986 }
Tony Truong7772e692017-04-13 17:03:34 -07003987
Tony Truong349ee492014-10-01 17:35:56 -07003988 goto out;
3989
3990link_fail:
Tony Truong7416d722017-09-12 16:45:18 -07003991 if (msm_pcie_keep_resources_on & BIT(dev->rc_idx))
3992 goto out;
3993
Tony Truong349ee492014-10-01 17:35:56 -07003994 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
3995 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
3996 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
Tony Truongc275fe02017-04-18 19:04:20 -07003997
3998 if (dev->max_link_speed != GEN3_SPEED) {
3999 msm_pcie_write_reg(dev->phy,
4000 PCIE_N_SW_RESET(dev->rc_idx), 0x1);
4001 msm_pcie_write_reg(dev->phy,
4002 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx), 0);
4003 }
Tony Truong349ee492014-10-01 17:35:56 -07004004
4005 msm_pcie_pipe_clk_deinit(dev);
4006 msm_pcie_clk_deinit(dev);
4007clk_fail:
4008 msm_pcie_vreg_deinit(dev);
4009out:
4010 mutex_unlock(&dev->setup_lock);
4011
4012 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4013
4014 return ret;
4015}
4016
Stephen Boydb5b8fc32017-06-21 08:59:11 -07004017static void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
Tony Truong349ee492014-10-01 17:35:56 -07004018{
4019 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
4020
4021 mutex_lock(&dev->setup_lock);
4022
4023 if (!dev->power_on) {
4024 PCIE_DBG(dev,
4025 "PCIe: the link of RC%d is already power down.\n",
4026 dev->rc_idx);
4027 mutex_unlock(&dev->setup_lock);
4028 return;
4029 }
4030
4031 dev->link_status = MSM_PCIE_LINK_DISABLED;
4032 dev->power_on = false;
4033 dev->link_turned_off_counter++;
4034
4035 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
4036 dev->rc_idx);
4037
4038 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4039 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4040
Tony Truongc275fe02017-04-18 19:04:20 -07004041 if (dev->max_link_speed != GEN3_SPEED) {
4042 msm_pcie_write_reg(dev->phy,
4043 PCIE_N_SW_RESET(dev->rc_idx), 0x1);
4044 msm_pcie_write_reg(dev->phy,
4045 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx), 0);
4046 }
Tony Truong349ee492014-10-01 17:35:56 -07004047
4048 if (options & PM_CLK) {
4049 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
4050 BIT(0));
4051 msm_pcie_clk_deinit(dev);
4052 }
4053
4054 if (options & PM_VREG)
4055 msm_pcie_vreg_deinit(dev);
4056
4057 if (options & PM_PIPE_CLK)
4058 msm_pcie_pipe_clk_deinit(dev);
4059
4060 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
4061 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
4062 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
4063
4064 mutex_unlock(&dev->setup_lock);
4065
4066 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
4067}
4068
4069static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
4070 struct msm_pcie_device_info *ep_dev_info)
4071{
4072 u32 val;
4073 void __iomem *ep_base = ep_dev_info->conf_base;
4074 u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
4075 0xff;
4076
4077 while (current_offset) {
4078 if (msm_pcie_check_align(dev, current_offset))
4079 return;
4080
4081 val = readl_relaxed(ep_base + current_offset);
4082 if ((val & 0xff) == PCIE20_CAP_ID) {
4083 ep_dev_info->dev_ctrlstts_offset =
4084 current_offset + 0x8;
4085 break;
4086 }
4087 current_offset = (val >> 8) & 0xff;
4088 }
4089
4090 if (!ep_dev_info->dev_ctrlstts_offset) {
4091 PCIE_DBG(dev,
4092 "RC%d endpoint does not support PCIe cap registers\n",
4093 dev->rc_idx);
4094 return;
4095 }
4096
4097 PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
4098 dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
4099
4100 /* Enable AER on EP */
4101 msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
4102 BIT(3)|BIT(2)|BIT(1)|BIT(0));
4103
4104 PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
4105 readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
4106}
4107
4108static int msm_pcie_config_device_table(struct device *dev, void *pdev)
4109{
4110 struct pci_dev *pcidev = to_pci_dev(dev);
4111 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
4112 struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
4113 struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
4114 int ret = 0;
4115 u32 rc_idx = pcie_dev->rc_idx;
4116 u32 i, index;
4117 u32 bdf = 0;
4118 u8 type;
4119 u32 h_type;
4120 u32 bme;
4121
4122 if (!pcidev) {
4123 PCIE_ERR(pcie_dev,
4124 "PCIe: Did not find PCI device in list for RC%d.\n",
4125 pcie_dev->rc_idx);
4126 return -ENODEV;
4127 }
4128
4129 PCIE_DBG(pcie_dev,
4130 "PCI device found: vendor-id:0x%x device-id:0x%x\n",
4131 pcidev->vendor, pcidev->device);
4132
4133 if (!pcidev->bus->number)
4134 return ret;
4135
4136 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4137 type = pcidev->bus->number == 1 ?
4138 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
4139
4140 for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
4141 if (msm_pcie_dev_tbl[i].bdf == bdf &&
4142 !msm_pcie_dev_tbl[i].dev) {
4143 for (index = 0; index < MAX_DEVICE_NUM; index++) {
4144 if (dev_table_t[index].bdf == bdf) {
4145 msm_pcie_dev_tbl[i].dev = pcidev;
4146 msm_pcie_dev_tbl[i].domain = rc_idx;
4147 msm_pcie_dev_tbl[i].conf_base =
4148 pcie_dev->conf + index * SZ_4K;
4149 msm_pcie_dev_tbl[i].phy_address =
4150 axi_conf->start + index * SZ_4K;
4151
4152 dev_table_t[index].dev = pcidev;
4153 dev_table_t[index].domain = rc_idx;
4154 dev_table_t[index].conf_base =
4155 pcie_dev->conf + index * SZ_4K;
4156 dev_table_t[index].phy_address =
4157 axi_conf->start + index * SZ_4K;
4158
4159 msm_pcie_iatu_config(pcie_dev, index,
4160 type,
4161 dev_table_t[index].phy_address,
4162 dev_table_t[index].phy_address
4163 + SZ_4K - 1,
4164 bdf);
4165
4166 h_type = readl_relaxed(
4167 dev_table_t[index].conf_base +
4168 PCIE20_HEADER_TYPE);
4169
4170 bme = readl_relaxed(
4171 dev_table_t[index].conf_base +
4172 PCIE20_COMMAND_STATUS);
4173
4174 if (h_type & (1 << 16)) {
4175 pci_write_config_dword(pcidev,
4176 PCIE20_COMMAND_STATUS,
4177 bme | 0x06);
4178 } else {
4179 pcie_dev->num_ep++;
4180 dev_table_t[index].registered =
4181 false;
4182 }
4183
4184 if (pcie_dev->num_ep > 1)
4185 pcie_dev->pending_ep_reg = true;
4186
Tony Truong7416d722017-09-12 16:45:18 -07004187 if (pcie_dev->aer_enable)
4188 msm_pcie_config_ep_aer(pcie_dev,
4189 &dev_table_t[index]);
Tony Truong349ee492014-10-01 17:35:56 -07004190
4191 break;
4192 }
4193 }
4194 if (index == MAX_DEVICE_NUM) {
4195 PCIE_ERR(pcie_dev,
4196 "RC%d PCI device table is full.\n",
4197 rc_idx);
4198 ret = index;
4199 } else {
4200 break;
4201 }
4202 } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
4203 pcidev == msm_pcie_dev_tbl[i].dev) {
4204 break;
4205 }
4206 }
4207 if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
4208 PCIE_ERR(pcie_dev,
4209 "Global PCI device table is full: %d elements.\n",
4210 i);
4211 PCIE_ERR(pcie_dev,
4212 "Bus number is 0x%x\nDevice number is 0x%x\n",
4213 pcidev->bus->number, pcidev->devfn);
4214 ret = i;
4215 }
4216 return ret;
4217}
4218
Tony Truong2a022a02017-04-13 14:04:30 -07004219static void msm_pcie_configure_sid(struct msm_pcie_dev_t *pcie_dev,
4220 struct pci_dev *dev)
Tony Truong349ee492014-10-01 17:35:56 -07004221{
Tony Truong2a022a02017-04-13 14:04:30 -07004222 u32 offset;
4223 u32 sid;
Tony Truong349ee492014-10-01 17:35:56 -07004224 u32 bdf;
Tony Truong2a022a02017-04-13 14:04:30 -07004225 int ret;
Tony Truong349ee492014-10-01 17:35:56 -07004226
Tony Truong2a022a02017-04-13 14:04:30 -07004227 ret = iommu_fwspec_get_id(&dev->dev, &sid);
4228 if (ret) {
Tony Truong349ee492014-10-01 17:35:56 -07004229 PCIE_DBG(pcie_dev,
Tony Truong2a022a02017-04-13 14:04:30 -07004230 "PCIe: RC%d: Device does not have a SID\n",
Tony Truong349ee492014-10-01 17:35:56 -07004231 pcie_dev->rc_idx);
Tony Truong2a022a02017-04-13 14:04:30 -07004232 return;
Tony Truong349ee492014-10-01 17:35:56 -07004233 }
4234
4235 PCIE_DBG(pcie_dev,
Tony Truong2a022a02017-04-13 14:04:30 -07004236 "PCIe: RC%d: Device SID: 0x%x\n",
4237 pcie_dev->rc_idx, sid);
Tony Truong349ee492014-10-01 17:35:56 -07004238
Tony Truong2a022a02017-04-13 14:04:30 -07004239 bdf = BDF_OFFSET(dev->bus->number, dev->devfn);
4240 offset = (sid - pcie_dev->smmu_sid_base) * 4;
4241
4242 if (offset >= MAX_SHORT_BDF_NUM * 4) {
4243 PCIE_ERR(pcie_dev,
4244 "PCIe: RC%d: Invalid SID offset: 0x%x. Should be less than 0x%x\n",
4245 pcie_dev->rc_idx, offset, MAX_SHORT_BDF_NUM * 4);
4246 return;
4247 }
4248
4249 msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
4250 msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_SID_OFFSET, 0);
4251 msm_pcie_write_reg(pcie_dev->parf,
4252 PCIE20_PARF_BDF_TRANSLATE_N + offset, bdf >> 16);
Tony Truong349ee492014-10-01 17:35:56 -07004253}
Tony Truong349ee492014-10-01 17:35:56 -07004254
4255int msm_pcie_enumerate(u32 rc_idx)
4256{
4257 int ret = 0, bus_ret = 0, scan_ret = 0;
4258 struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
4259
4260 mutex_lock(&dev->enumerate_lock);
4261
4262 PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
4263
4264 if (!dev->drv_ready) {
4265 PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
4266 rc_idx);
4267 ret = -EPROBE_DEFER;
4268 goto out;
4269 }
4270
4271 if (!dev->enumerated) {
4272 ret = msm_pcie_enable(dev, PM_ALL);
4273
4274 /* kick start ARM PCI configuration framework */
4275 if (!ret) {
4276 struct pci_dev *pcidev = NULL;
4277 bool found = false;
4278 struct pci_bus *bus;
4279 resource_size_t iobase = 0;
4280 u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
4281 u32 vendor_id = ids & 0xffff;
4282 u32 device_id = (ids & 0xffff0000) >> 16;
4283 LIST_HEAD(res);
4284
4285 PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
4286 vendor_id, device_id);
4287
4288 ret = of_pci_get_host_bridge_resources(
4289 dev->pdev->dev.of_node,
4290 0, 0xff, &res, &iobase);
4291 if (ret) {
4292 PCIE_ERR(dev,
4293 "PCIe: failed to get host bridge resources for RC%d: %d\n",
4294 dev->rc_idx, ret);
4295 goto out;
4296 }
4297
4298 bus = pci_create_root_bus(&dev->pdev->dev, 0,
Tony Truong09223e42017-11-08 16:50:20 -08004299 &msm_pcie_ops, dev, &res);
Tony Truong349ee492014-10-01 17:35:56 -07004300 if (!bus) {
4301 PCIE_ERR(dev,
4302 "PCIe: failed to create root bus for RC%d\n",
4303 dev->rc_idx);
4304 ret = -ENOMEM;
4305 goto out;
4306 }
4307
4308 scan_ret = pci_scan_child_bus(bus);
4309 PCIE_DBG(dev,
4310 "PCIe: RC%d: The max subordinate bus number discovered is %d\n",
Tony Truong09223e42017-11-08 16:50:20 -08004311 dev->rc_idx, scan_ret);
Tony Truong349ee492014-10-01 17:35:56 -07004312
4313 msm_pcie_fixup_irqs(dev);
4314 pci_assign_unassigned_bus_resources(bus);
4315 pci_bus_add_devices(bus);
4316
4317 dev->enumerated = true;
4318
4319 msm_pcie_write_mask(dev->dm_core +
4320 PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
4321
4322 if (dev->cpl_timeout && dev->bridge_found)
4323 msm_pcie_write_reg_field(dev->dm_core,
4324 PCIE20_DEVICE_CONTROL2_STATUS2,
4325 0xf, dev->cpl_timeout);
4326
4327 if (dev->shadow_en) {
4328 u32 val = readl_relaxed(dev->dm_core +
4329 PCIE20_COMMAND_STATUS);
4330 PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
4331 val);
4332 dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
4333 }
4334
4335 do {
4336 pcidev = pci_get_device(vendor_id,
4337 device_id, pcidev);
4338 if (pcidev && (&msm_pcie_dev[rc_idx] ==
4339 (struct msm_pcie_dev_t *)
4340 PCIE_BUS_PRIV_DATA(pcidev->bus))) {
4341 msm_pcie_dev[rc_idx].dev = pcidev;
4342 found = true;
4343 PCIE_DBG(&msm_pcie_dev[rc_idx],
4344 "PCI device is found for RC%d\n",
4345 rc_idx);
4346 }
4347 } while (!found && pcidev);
4348
4349 if (!pcidev) {
4350 PCIE_ERR(dev,
4351 "PCIe: Did not find PCI device for RC%d.\n",
4352 dev->rc_idx);
4353 ret = -ENODEV;
4354 goto out;
4355 }
4356
4357 bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
4358 &msm_pcie_config_device_table);
4359
4360 if (bus_ret) {
4361 PCIE_ERR(dev,
4362 "PCIe: Failed to set up device table for RC%d\n",
4363 dev->rc_idx);
4364 ret = -ENODEV;
4365 goto out;
4366 }
Tony Truongb1af8b62017-05-31 15:40:38 -07004367
4368 msm_pcie_config_link_pm_rc(dev, dev->dev, true);
Tony Truong349ee492014-10-01 17:35:56 -07004369 } else {
4370 PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
4371 dev->rc_idx);
4372 }
4373 } else {
4374 PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
4375 dev->rc_idx);
4376 }
4377
4378out:
4379 mutex_unlock(&dev->enumerate_lock);
4380
4381 return ret;
4382}
4383EXPORT_SYMBOL(msm_pcie_enumerate);
4384
4385static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
4386 enum msm_pcie_event event)
4387{
4388 if (dev->event_reg && dev->event_reg->callback &&
4389 (dev->event_reg->events & event)) {
4390 struct msm_pcie_notify *notify = &dev->event_reg->notify;
4391
4392 notify->event = event;
4393 notify->user = dev->event_reg->user;
4394 PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
4395 dev->rc_idx, event);
4396 dev->event_reg->callback(notify);
4397
4398 if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
4399 (event == MSM_PCIE_EVENT_LINKDOWN)) {
4400 dev->user_suspend = true;
4401 PCIE_DBG(dev,
4402 "PCIe: Client of RC%d will recover the link later.\n",
4403 dev->rc_idx);
4404 return;
4405 }
4406 } else {
4407 PCIE_DBG2(dev,
4408 "PCIe: Client of RC%d does not have registration for event %d\n",
4409 dev->rc_idx, event);
4410 }
4411}
4412
4413static void handle_wake_func(struct work_struct *work)
4414{
4415 int i, ret;
4416 struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
4417 handle_wake_work);
4418
4419 PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
4420
4421 mutex_lock(&dev->recovery_lock);
4422
4423 if (!dev->enumerated) {
4424 PCIE_DBG(dev,
4425 "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
4426 dev->rc_idx);
4427
4428 ret = msm_pcie_enumerate(dev->rc_idx);
4429 if (ret) {
4430 PCIE_ERR(dev,
4431 "PCIe: failed to enable RC%d upon wake request from the device.\n",
4432 dev->rc_idx);
4433 goto out;
4434 }
4435
4436 if (dev->num_ep > 1) {
4437 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4438 dev->event_reg = dev->pcidev_table[i].event_reg;
4439
4440 if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
4441 && dev->event_reg &&
4442 dev->event_reg->callback &&
4443 (dev->event_reg->events &
4444 MSM_PCIE_EVENT_LINKUP)) {
4445 struct msm_pcie_notify *notify =
4446 &dev->event_reg->notify;
4447 notify->event = MSM_PCIE_EVENT_LINKUP;
4448 notify->user = dev->event_reg->user;
4449 PCIE_DBG(dev,
4450 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
4451 dev->rc_idx);
4452 dev->event_reg->callback(notify);
4453 }
4454 }
4455 } else {
4456 if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
4457 dev->event_reg && dev->event_reg->callback &&
4458 (dev->event_reg->events &
4459 MSM_PCIE_EVENT_LINKUP)) {
4460 struct msm_pcie_notify *notify =
4461 &dev->event_reg->notify;
4462 notify->event = MSM_PCIE_EVENT_LINKUP;
4463 notify->user = dev->event_reg->user;
4464 PCIE_DBG(dev,
4465 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
4466 dev->rc_idx);
4467 dev->event_reg->callback(notify);
4468 } else {
4469 PCIE_DBG(dev,
4470 "PCIe: Client of RC%d does not have registration for linkup event.\n",
4471 dev->rc_idx);
4472 }
4473 }
4474 goto out;
4475 } else {
4476 PCIE_ERR(dev,
4477 "PCIe: The enumeration for RC%d has already been done.\n",
4478 dev->rc_idx);
4479 goto out;
4480 }
4481
4482out:
4483 mutex_unlock(&dev->recovery_lock);
4484}
4485
4486static irqreturn_t handle_aer_irq(int irq, void *data)
4487{
4488 struct msm_pcie_dev_t *dev = data;
4489
4490 int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
4491 int ep_corr_val = 0, ep_uncorr_val = 0;
4492 int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
4493 u32 ep_dev_ctrlstts_offset = 0;
4494 int i, j, ep_src_bdf = 0;
4495 void __iomem *ep_base = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07004496
4497 PCIE_DBG2(dev,
4498 "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
4499 dev->rc_idx, irq, dev->rc_corr_counter,
4500 dev->rc_non_fatal_counter, dev->rc_fatal_counter,
4501 dev->ep_corr_counter, dev->ep_non_fatal_counter,
4502 dev->ep_fatal_counter);
4503
Tony Truong349ee492014-10-01 17:35:56 -07004504 uncorr_val = readl_relaxed(dev->dm_core +
4505 PCIE20_AER_UNCORR_ERR_STATUS_REG);
4506 corr_val = readl_relaxed(dev->dm_core +
4507 PCIE20_AER_CORR_ERR_STATUS_REG);
4508 rc_err_status = readl_relaxed(dev->dm_core +
4509 PCIE20_AER_ROOT_ERR_STATUS_REG);
4510 rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
4511 PCIE20_CAP_DEVCTRLSTATUS);
4512
4513 if (uncorr_val)
4514 PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
4515 uncorr_val);
4516 if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
4517 PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
4518 corr_val);
4519
4520 if ((rc_dev_ctrlstts >> 18) & 0x1)
4521 dev->rc_fatal_counter++;
4522 if ((rc_dev_ctrlstts >> 17) & 0x1)
4523 dev->rc_non_fatal_counter++;
4524 if ((rc_dev_ctrlstts >> 16) & 0x1)
4525 dev->rc_corr_counter++;
4526
4527 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
4528 BIT(18)|BIT(17)|BIT(16));
4529
4530 if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
4531 PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
4532 goto out;
4533 }
4534
4535 for (i = 0; i < 2; i++) {
4536 if (i)
4537 ep_src_bdf = readl_relaxed(dev->dm_core +
4538 PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
4539 else
4540 ep_src_bdf = (readl_relaxed(dev->dm_core +
4541 PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
4542
4543 if (!ep_src_bdf)
4544 continue;
4545
4546 for (j = 0; j < MAX_DEVICE_NUM; j++) {
4547 if (ep_src_bdf == dev->pcidev_table[j].bdf) {
4548 PCIE_DBG2(dev,
4549 "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
4550 i ? "Uncorrectable" : "Correctable",
4551 dev->pcidev_table[j].bdf >> 24,
4552 dev->pcidev_table[j].bdf >> 19 & 0x1f,
4553 dev->pcidev_table[j].bdf >> 16 & 0x07);
4554 ep_base = dev->pcidev_table[j].conf_base;
4555 ep_dev_ctrlstts_offset = dev->
4556 pcidev_table[j].dev_ctrlstts_offset;
4557 break;
4558 }
4559 }
4560
4561 if (!ep_base) {
4562 PCIE_ERR(dev,
4563 "PCIe: RC%d no endpoint found for reported error\n",
4564 dev->rc_idx);
4565 goto out;
4566 }
4567
4568 ep_uncorr_val = readl_relaxed(ep_base +
4569 PCIE20_AER_UNCORR_ERR_STATUS_REG);
4570 ep_corr_val = readl_relaxed(ep_base +
4571 PCIE20_AER_CORR_ERR_STATUS_REG);
4572 ep_dev_ctrlstts = readl_relaxed(ep_base +
4573 ep_dev_ctrlstts_offset);
4574
4575 if (ep_uncorr_val)
4576 PCIE_DBG(dev,
4577 "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
4578 ep_uncorr_val);
4579 if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
4580 PCIE_DBG(dev,
4581 "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
4582 ep_corr_val);
4583
4584 if ((ep_dev_ctrlstts >> 18) & 0x1)
4585 dev->ep_fatal_counter++;
4586 if ((ep_dev_ctrlstts >> 17) & 0x1)
4587 dev->ep_non_fatal_counter++;
4588 if ((ep_dev_ctrlstts >> 16) & 0x1)
4589 dev->ep_corr_counter++;
4590
4591 msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
4592 BIT(18)|BIT(17)|BIT(16));
4593
4594 msm_pcie_write_reg_field(ep_base,
4595 PCIE20_AER_UNCORR_ERR_STATUS_REG,
4596 0x3fff031, 0x3fff031);
4597 msm_pcie_write_reg_field(ep_base,
4598 PCIE20_AER_CORR_ERR_STATUS_REG,
4599 0xf1c1, 0xf1c1);
4600 }
4601out:
4602 if (((dev->rc_corr_counter < corr_counter_limit) &&
4603 (dev->ep_corr_counter < corr_counter_limit)) ||
4604 uncorr_val || ep_uncorr_val)
4605 PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
4606 rc_err_status);
4607 msm_pcie_write_reg_field(dev->dm_core,
4608 PCIE20_AER_UNCORR_ERR_STATUS_REG,
4609 0x3fff031, 0x3fff031);
4610 msm_pcie_write_reg_field(dev->dm_core,
4611 PCIE20_AER_CORR_ERR_STATUS_REG,
4612 0xf1c1, 0xf1c1);
4613 msm_pcie_write_reg_field(dev->dm_core,
4614 PCIE20_AER_ROOT_ERR_STATUS_REG,
4615 0x7f, 0x7f);
4616
Tony Truong349ee492014-10-01 17:35:56 -07004617 return IRQ_HANDLED;
4618}
4619
4620static irqreturn_t handle_wake_irq(int irq, void *data)
4621{
4622 struct msm_pcie_dev_t *dev = data;
4623 unsigned long irqsave_flags;
4624 int i;
4625
4626 spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
4627
4628 dev->wake_counter++;
4629 PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
4630 dev->wake_counter, dev->rc_idx);
4631
4632 PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
4633 dev->rc_idx);
4634
Tony Truong9f2c7722017-02-28 15:02:27 -08004635 if (!dev->enumerated && !(dev->boot_option &
4636 MSM_PCIE_NO_WAKE_ENUMERATION)) {
4637 PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
4638 schedule_work(&dev->handle_wake_work);
Tony Truong349ee492014-10-01 17:35:56 -07004639 } else {
4640 PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
4641 __pm_stay_awake(&dev->ws);
4642 __pm_relax(&dev->ws);
4643
4644 if (dev->num_ep > 1) {
4645 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4646 dev->event_reg =
4647 dev->pcidev_table[i].event_reg;
4648 msm_pcie_notify_client(dev,
4649 MSM_PCIE_EVENT_WAKEUP);
4650 }
4651 } else {
4652 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
4653 }
4654 }
4655
4656 spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
4657
4658 return IRQ_HANDLED;
4659}
4660
4661static irqreturn_t handle_linkdown_irq(int irq, void *data)
4662{
4663 struct msm_pcie_dev_t *dev = data;
Tony Truong349ee492014-10-01 17:35:56 -07004664 int i;
4665
Tony Truong349ee492014-10-01 17:35:56 -07004666 dev->linkdown_counter++;
4667
4668 PCIE_DBG(dev,
4669 "PCIe: No. %ld linkdown IRQ for RC%d.\n",
4670 dev->linkdown_counter, dev->rc_idx);
4671
4672 if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
4673 PCIE_DBG(dev,
4674 "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
4675 dev->rc_idx);
4676 } else if (dev->suspending) {
4677 PCIE_DBG(dev,
4678 "PCIe:the link of RC%d is suspending.\n",
4679 dev->rc_idx);
4680 } else {
4681 dev->link_status = MSM_PCIE_LINK_DISABLED;
4682 dev->shadow_en = false;
4683
4684 if (dev->linkdown_panic)
4685 panic("User has chosen to panic on linkdown\n");
4686
4687 /* assert PERST */
Tony Truong7416d722017-09-12 16:45:18 -07004688 if (!(msm_pcie_keep_resources_on & BIT(dev->rc_idx)))
4689 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4690 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4691
Tony Truong349ee492014-10-01 17:35:56 -07004692 PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
4693
4694 if (dev->num_ep > 1) {
4695 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4696 dev->event_reg =
4697 dev->pcidev_table[i].event_reg;
4698 msm_pcie_notify_client(dev,
4699 MSM_PCIE_EVENT_LINKDOWN);
4700 }
4701 } else {
4702 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
4703 }
4704 }
4705
Tony Truong349ee492014-10-01 17:35:56 -07004706 return IRQ_HANDLED;
4707}
4708
4709static irqreturn_t handle_msi_irq(int irq, void *data)
4710{
4711 int i, j;
4712 unsigned long val;
4713 struct msm_pcie_dev_t *dev = data;
4714 void __iomem *ctrl_status;
4715
4716 PCIE_DUMP(dev, "irq: %d\n", irq);
4717
4718 /*
4719 * check for set bits, clear it by setting that bit
4720 * and trigger corresponding irq
4721 */
4722 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
4723 ctrl_status = dev->dm_core +
4724 PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
4725
4726 val = readl_relaxed(ctrl_status);
4727 while (val) {
4728 j = find_first_bit(&val, 32);
4729 writel_relaxed(BIT(j), ctrl_status);
4730 /* ensure that interrupt is cleared (acked) */
4731 wmb();
4732 generic_handle_irq(
4733 irq_find_mapping(dev->irq_domain, (j + (32*i)))
4734 );
4735 val = readl_relaxed(ctrl_status);
4736 }
4737 }
4738
4739 return IRQ_HANDLED;
4740}
4741
4742static irqreturn_t handle_global_irq(int irq, void *data)
4743{
4744 int i;
4745 struct msm_pcie_dev_t *dev = data;
4746 unsigned long irqsave_flags;
4747 u32 status = 0;
4748
Tony Truongbab696f2017-11-15 16:38:51 -08004749 spin_lock_irqsave(&dev->irq_lock, irqsave_flags);
4750
4751 if (dev->suspending) {
4752 PCIE_DBG2(dev,
4753 "PCIe: RC%d is currently suspending.\n",
4754 dev->rc_idx);
4755 spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
4756 return IRQ_HANDLED;
4757 }
Tony Truong349ee492014-10-01 17:35:56 -07004758
4759 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
4760 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
4761
4762 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
4763
4764 PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
4765 dev->rc_idx, irq, status);
4766
4767 for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
4768 if (status & BIT(i)) {
4769 switch (i) {
4770 case MSM_PCIE_INT_EVT_LINK_DOWN:
4771 PCIE_DBG(dev,
4772 "PCIe: RC%d: handle linkdown event.\n",
4773 dev->rc_idx);
4774 handle_linkdown_irq(irq, data);
4775 break;
4776 case MSM_PCIE_INT_EVT_AER_LEGACY:
4777 PCIE_DBG(dev,
4778 "PCIe: RC%d: AER legacy event.\n",
4779 dev->rc_idx);
4780 handle_aer_irq(irq, data);
4781 break;
4782 case MSM_PCIE_INT_EVT_AER_ERR:
4783 PCIE_DBG(dev,
4784 "PCIe: RC%d: AER event.\n",
4785 dev->rc_idx);
4786 handle_aer_irq(irq, data);
4787 break;
4788 default:
Tony Truong3f110d42017-04-07 17:12:23 -07004789 PCIE_DUMP(dev,
Tony Truong349ee492014-10-01 17:35:56 -07004790 "PCIe: RC%d: Unexpected event %d is caught!\n",
4791 dev->rc_idx, i);
4792 }
4793 }
4794 }
4795
Tony Truongbab696f2017-11-15 16:38:51 -08004796 spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
Tony Truong349ee492014-10-01 17:35:56 -07004797
4798 return IRQ_HANDLED;
4799}
4800
Tony Truong52122a62017-03-23 18:00:34 -07004801static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
Tony Truong9e240272017-11-14 15:59:11 -08004802 struct pci_dev *pdev,
4803 struct msi_desc *entry)
Tony Truong52122a62017-03-23 18:00:34 -07004804{
4805 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
4806 int bypass_en = 0;
4807
4808 if (!domain) {
4809 PCIE_DBG(dev,
4810 "PCIe: RC%d: client does not have an iommu domain\n",
4811 dev->rc_idx);
4812 return;
4813 }
4814
4815 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
4816 if (!bypass_en) {
Tony Truong9e240272017-11-14 15:59:11 -08004817 dma_addr_t iova = entry->msg.address_lo;
Tony Truong52122a62017-03-23 18:00:34 -07004818
Tony Truong9e240272017-11-14 15:59:11 -08004819 PCIE_DBG(dev, "PCIe: RC%d: unmap QGIC MSI IOVA\n", dev->rc_idx);
4820
4821 dma_unmap_resource(&pdev->dev, iova, PAGE_SIZE,
4822 DMA_BIDIRECTIONAL, 0);
Tony Truong52122a62017-03-23 18:00:34 -07004823 }
4824}
4825
Tony Truong92940fe2017-12-21 11:01:25 -08004826static void msm_pcie_destroy_irq(struct msi_desc *entry, unsigned int irq)
Tony Truong349ee492014-10-01 17:35:56 -07004827{
Tony Truongc3c52ae2017-03-29 12:16:51 -07004828 int pos;
Tony Truong349ee492014-10-01 17:35:56 -07004829 struct msm_pcie_dev_t *dev;
Tony Truong92940fe2017-12-21 11:01:25 -08004830 struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
Tony Truongb09d0e82017-06-02 13:37:36 -07004831
Tony Truongc3c52ae2017-03-29 12:16:51 -07004832 if (!pdev) {
4833 pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
Tony Truong349ee492014-10-01 17:35:56 -07004834 return;
4835 }
4836
Tony Truongc3c52ae2017-03-29 12:16:51 -07004837 dev = PCIE_BUS_PRIV_DATA(pdev->bus);
4838 if (!dev) {
4839 pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
4840 return;
4841 }
4842
Tony Truong349ee492014-10-01 17:35:56 -07004843 if (dev->msi_gicm_addr) {
Tony Truong92940fe2017-12-21 11:01:25 -08004844 int firstirq = entry->irq;
4845 u32 nvec = (1 << entry->msi_attrib.multiple);
4846
Tony Truong349ee492014-10-01 17:35:56 -07004847 PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
4848
Tony Truongc3c52ae2017-03-29 12:16:51 -07004849 if (irq < firstirq || irq > firstirq + nvec - 1) {
Tony Truong349ee492014-10-01 17:35:56 -07004850 PCIE_ERR(dev,
4851 "Could not find irq: %d in RC%d MSI table\n",
4852 irq, dev->rc_idx);
4853 return;
4854 }
Tony Truong52122a62017-03-23 18:00:34 -07004855 if (irq == firstirq + nvec - 1)
Tony Truong9e240272017-11-14 15:59:11 -08004856 msm_pcie_unmap_qgic_addr(dev, pdev, entry);
Tony Truongc3c52ae2017-03-29 12:16:51 -07004857 pos = irq - firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07004858 } else {
4859 PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
4860 pos = irq - irq_find_mapping(dev->irq_domain, 0);
4861 }
4862
4863 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4864
4865 PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
4866 pos, *dev->msi_irq_in_use);
4867 clear_bit(pos, dev->msi_irq_in_use);
4868 PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
4869 pos, *dev->msi_irq_in_use);
4870}
4871
4872/* hookup to linux pci msi framework */
4873void arch_teardown_msi_irq(unsigned int irq)
4874{
Tony Truong92940fe2017-12-21 11:01:25 -08004875 struct msi_desc *entry = irq_get_msi_desc(irq);
4876
Tony Truong349ee492014-10-01 17:35:56 -07004877 PCIE_GEN_DBG("irq %d deallocated\n", irq);
Tony Truong92940fe2017-12-21 11:01:25 -08004878
4879 if (entry)
4880 msm_pcie_destroy_irq(entry, irq);
Tony Truong349ee492014-10-01 17:35:56 -07004881}
4882
4883void arch_teardown_msi_irqs(struct pci_dev *dev)
4884{
4885 struct msi_desc *entry;
4886 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
4887
4888 PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
4889 pcie_dev->rc_idx, dev->vendor, dev->device);
4890
4891 pcie_dev->use_msi = false;
4892
4893 list_for_each_entry(entry, &dev->dev.msi_list, list) {
4894 int i, nvec;
4895
4896 if (entry->irq == 0)
4897 continue;
4898 nvec = 1 << entry->msi_attrib.multiple;
4899 for (i = 0; i < nvec; i++)
Tony Truong92940fe2017-12-21 11:01:25 -08004900 msm_pcie_destroy_irq(entry, entry->irq + i);
Tony Truong349ee492014-10-01 17:35:56 -07004901 }
4902}
4903
4904static void msm_pcie_msi_nop(struct irq_data *d)
4905{
4906}
4907
4908static struct irq_chip pcie_msi_chip = {
4909 .name = "msm-pcie-msi",
4910 .irq_ack = msm_pcie_msi_nop,
4911 .irq_enable = unmask_msi_irq,
4912 .irq_disable = mask_msi_irq,
4913 .irq_mask = mask_msi_irq,
4914 .irq_unmask = unmask_msi_irq,
4915};
4916
4917static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
4918{
4919 int irq, pos;
4920
4921 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4922
4923again:
4924 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
4925
4926 if (pos >= PCIE_MSI_NR_IRQS)
4927 return -ENOSPC;
4928
4929 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
4930
4931 if (test_and_set_bit(pos, dev->msi_irq_in_use))
4932 goto again;
4933 else
4934 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
4935
4936 irq = irq_create_mapping(dev->irq_domain, pos);
4937 if (!irq)
4938 return -EINVAL;
4939
4940 return irq;
4941}
4942
4943static int arch_setup_msi_irq_default(struct pci_dev *pdev,
4944 struct msi_desc *desc, int nvec)
4945{
4946 int irq;
4947 struct msi_msg msg;
4948 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
4949
4950 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4951
4952 irq = msm_pcie_create_irq(dev);
4953
4954 PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
4955
4956 if (irq < 0)
4957 return irq;
4958
4959 PCIE_DBG(dev, "irq %d allocated\n", irq);
4960
Tony Truongc3c52ae2017-03-29 12:16:51 -07004961 irq_set_chip_data(irq, pdev);
Tony Truong349ee492014-10-01 17:35:56 -07004962 irq_set_msi_desc(irq, desc);
4963
4964 /* write msi vector and data */
4965 msg.address_hi = 0;
4966 msg.address_lo = MSM_PCIE_MSI_PHY;
4967 msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
4968 write_msi_msg(irq, &msg);
4969
4970 return 0;
4971}
4972
4973static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
4974{
4975 int irq, pos;
4976
4977 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4978
4979again:
4980 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
4981
4982 if (pos >= PCIE_MSI_NR_IRQS)
4983 return -ENOSPC;
4984
4985 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
4986
4987 if (test_and_set_bit(pos, dev->msi_irq_in_use))
4988 goto again;
4989 else
4990 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
4991
4992 if (pos >= MSM_PCIE_MAX_MSI) {
4993 PCIE_ERR(dev,
4994 "PCIe: RC%d: pos %d is not less than %d\n",
4995 dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
4996 return MSM_PCIE_ERROR;
4997 }
4998
4999 irq = dev->msi[pos].num;
5000 if (!irq) {
5001 PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
5002 dev->rc_idx);
5003 return -EINVAL;
5004 }
5005
5006 return irq;
5007}
5008
Tony Truong52122a62017-03-23 18:00:34 -07005009static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
5010 struct pci_dev *pdev,
5011 struct msi_msg *msg)
5012{
5013 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
Tony Truong9e240272017-11-14 15:59:11 -08005014 int bypass_en = 0;
5015 dma_addr_t iova;
Tony Truong52122a62017-03-23 18:00:34 -07005016
5017 msg->address_hi = 0;
5018 msg->address_lo = dev->msi_gicm_addr;
5019
5020 if (!domain) {
5021 PCIE_DBG(dev,
5022 "PCIe: RC%d: client does not have an iommu domain\n",
5023 dev->rc_idx);
5024 return 0;
5025 }
5026
5027 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
5028
5029 PCIE_DBG(dev,
5030 "PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
5031 dev->rc_idx, bypass_en ? "bypass" : "enabled",
5032 pdev->bus->number, pdev->devfn);
5033
5034 if (bypass_en)
5035 return 0;
5036
Tony Truong9e240272017-11-14 15:59:11 -08005037 iova = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE,
Tony Truong4a70c6b2017-06-05 18:27:32 -07005038 DMA_BIDIRECTIONAL, 0);
Tony Truong9e240272017-11-14 15:59:11 -08005039 if (dma_mapping_error(&pdev->dev, iova)) {
Tony Truong4a70c6b2017-06-05 18:27:32 -07005040 PCIE_ERR(dev, "PCIe: RC%d: failed to map QGIC address",
5041 dev->rc_idx);
5042 return -EIO;
Tony Truong52122a62017-03-23 18:00:34 -07005043 }
5044
Tony Truong9e240272017-11-14 15:59:11 -08005045 msg->address_lo = iova;
Tony Truong52122a62017-03-23 18:00:34 -07005046
5047 return 0;
5048}
5049
Tony Truong349ee492014-10-01 17:35:56 -07005050static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
5051 struct msi_desc *desc, int nvec)
5052{
Tony Truong52122a62017-03-23 18:00:34 -07005053 int irq, index, ret, firstirq = 0;
Tony Truong349ee492014-10-01 17:35:56 -07005054 struct msi_msg msg;
5055 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5056
5057 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5058
5059 for (index = 0; index < nvec; index++) {
5060 irq = msm_pcie_create_irq_qgic(dev);
5061 PCIE_DBG(dev, "irq %d is allocated\n", irq);
5062
5063 if (irq < 0)
5064 return irq;
5065
5066 if (index == 0)
5067 firstirq = irq;
5068
5069 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
5070 }
5071
5072 /* write msi vector and data */
5073 irq_set_msi_desc(firstirq, desc);
Tony Truong52122a62017-03-23 18:00:34 -07005074
5075 ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
5076 if (ret)
5077 return ret;
5078
Tony Truong349ee492014-10-01 17:35:56 -07005079 msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
5080 write_msi_msg(firstirq, &msg);
5081
5082 return 0;
5083}
5084
5085int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
5086{
5087 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5088
5089 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5090
5091 if (dev->msi_gicm_addr)
5092 return arch_setup_msi_irq_qgic(pdev, desc, 1);
5093 else
5094 return arch_setup_msi_irq_default(pdev, desc, 1);
5095}
5096
Tony Truong349ee492014-10-01 17:35:56 -07005097int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
5098{
5099 struct msi_desc *entry;
5100 int ret;
5101 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5102
5103 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5104
5105 if (type != PCI_CAP_ID_MSI || nvec > 32)
5106 return -ENOSPC;
5107
5108 PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
5109
5110 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5111 entry->msi_attrib.multiple =
Tony Truong8e8b96b2018-02-27 13:36:28 -08005112 __ilog2_u32(__roundup_pow_of_two(nvec));
Tony Truong349ee492014-10-01 17:35:56 -07005113
5114 if (pcie_dev->msi_gicm_addr)
5115 ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
5116 else
5117 ret = arch_setup_msi_irq_default(dev, entry, nvec);
5118
5119 PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
5120
5121 if (ret < 0)
5122 return ret;
5123 if (ret > 0)
5124 return -ENOSPC;
5125 }
5126
5127 pcie_dev->use_msi = true;
5128
5129 return 0;
5130}
5131
5132static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
5133 irq_hw_number_t hwirq)
5134{
5135 irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
Tony Truong349ee492014-10-01 17:35:56 -07005136 return 0;
5137}
5138
5139static const struct irq_domain_ops msm_pcie_msi_ops = {
5140 .map = msm_pcie_msi_map,
5141};
5142
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005143static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07005144{
5145 int rc;
5146 int msi_start = 0;
5147 struct device *pdev = &dev->pdev->dev;
5148
5149 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5150
5151 if (dev->rc_idx)
5152 wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
5153 else
5154 wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
5155
5156 /* register handler for linkdown interrupt */
5157 if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
5158 rc = devm_request_irq(pdev,
5159 dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
5160 handle_linkdown_irq,
5161 IRQF_TRIGGER_RISING,
5162 dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
5163 dev);
5164 if (rc) {
5165 PCIE_ERR(dev,
5166 "PCIe: Unable to request linkdown interrupt:%d\n",
5167 dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
5168 return rc;
5169 }
5170 }
5171
5172 /* register handler for physical MSI interrupt line */
5173 if (dev->irq[MSM_PCIE_INT_MSI].num) {
5174 rc = devm_request_irq(pdev,
5175 dev->irq[MSM_PCIE_INT_MSI].num,
5176 handle_msi_irq,
5177 IRQF_TRIGGER_RISING,
5178 dev->irq[MSM_PCIE_INT_MSI].name,
5179 dev);
5180 if (rc) {
5181 PCIE_ERR(dev,
5182 "PCIe: RC%d: Unable to request MSI interrupt\n",
5183 dev->rc_idx);
5184 return rc;
5185 }
5186 }
5187
5188 /* register handler for AER interrupt */
5189 if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
5190 rc = devm_request_irq(pdev,
5191 dev->irq[MSM_PCIE_INT_PLS_ERR].num,
5192 handle_aer_irq,
5193 IRQF_TRIGGER_RISING,
5194 dev->irq[MSM_PCIE_INT_PLS_ERR].name,
5195 dev);
5196 if (rc) {
5197 PCIE_ERR(dev,
5198 "PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
5199 dev->rc_idx,
5200 dev->irq[MSM_PCIE_INT_PLS_ERR].num);
5201 return rc;
5202 }
5203 }
5204
5205 /* register handler for AER legacy interrupt */
5206 if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
5207 rc = devm_request_irq(pdev,
5208 dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
5209 handle_aer_irq,
5210 IRQF_TRIGGER_RISING,
5211 dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
5212 dev);
5213 if (rc) {
5214 PCIE_ERR(dev,
5215 "PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
5216 dev->rc_idx,
5217 dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
5218 return rc;
5219 }
5220 }
5221
5222 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
5223 rc = devm_request_irq(pdev,
5224 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
5225 handle_global_irq,
5226 IRQF_TRIGGER_RISING,
5227 dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
5228 dev);
5229 if (rc) {
5230 PCIE_ERR(dev,
5231 "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
5232 dev->rc_idx,
5233 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
5234 return rc;
5235 }
5236 }
5237
5238 /* register handler for PCIE_WAKE_N interrupt line */
5239 if (dev->wake_n) {
5240 rc = devm_request_irq(pdev,
5241 dev->wake_n, handle_wake_irq,
5242 IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
5243 if (rc) {
5244 PCIE_ERR(dev,
5245 "PCIe: RC%d: Unable to request wake interrupt\n",
5246 dev->rc_idx);
5247 return rc;
5248 }
5249
5250 INIT_WORK(&dev->handle_wake_work, handle_wake_func);
5251
5252 rc = enable_irq_wake(dev->wake_n);
5253 if (rc) {
5254 PCIE_ERR(dev,
5255 "PCIe: RC%d: Unable to enable wake interrupt\n",
5256 dev->rc_idx);
5257 return rc;
5258 }
5259 }
5260
5261 /* Create a virtual domain of interrupts */
5262 if (!dev->msi_gicm_addr) {
5263 dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
5264 PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
5265
5266 if (!dev->irq_domain) {
5267 PCIE_ERR(dev,
5268 "PCIe: RC%d: Unable to initialize irq domain\n",
5269 dev->rc_idx);
5270
5271 if (dev->wake_n)
5272 disable_irq(dev->wake_n);
5273
5274 return PTR_ERR(dev->irq_domain);
5275 }
5276
5277 msi_start = irq_create_mapping(dev->irq_domain, 0);
5278 }
5279
5280 return 0;
5281}
5282
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005283static void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07005284{
5285 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5286
5287 wakeup_source_trash(&dev->ws);
5288
5289 if (dev->wake_n)
5290 disable_irq(dev->wake_n);
5291}
5292
Tony Truongb1af8b62017-05-31 15:40:38 -07005293static void msm_pcie_config_l0s(struct msm_pcie_dev_t *dev,
5294 struct pci_dev *pdev, bool enable)
5295{
5296 u32 val;
5297 u32 lnkcap_offset = pdev->pcie_cap + PCI_EXP_LNKCAP;
5298 u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
5299
5300 pci_read_config_dword(pdev, lnkcap_offset, &val);
5301 if (!(val & BIT(10))) {
5302 PCIE_DBG(dev,
5303 "PCIe: RC%d: PCI device does not support L0s\n",
5304 dev->rc_idx);
5305 return;
5306 }
5307
5308 if (enable)
5309 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
5310 PCI_EXP_LNKCTL_ASPM_L0S);
5311 else
5312 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset,
5313 PCI_EXP_LNKCTL_ASPM_L0S, 0);
5314
5315 pci_read_config_dword(pdev, lnkctl_offset, &val);
5316 PCIE_DBG2(dev, "PCIe: RC%d: LINKCTRLSTATUS:0x%x\n", dev->rc_idx, val);
5317}
5318
Tony Truongbad3b742017-11-22 14:40:19 -08005319static int msm_pcie_config_l0s_disable(struct pci_dev *pdev, void *dev)
5320{
5321 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
5322
5323 msm_pcie_config_l0s(pcie_dev, pdev, false);
5324 return 0;
5325}
5326
5327static int msm_pcie_config_l0s_enable(struct pci_dev *pdev, void *dev)
5328{
5329 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
5330
5331 msm_pcie_config_l0s(pcie_dev, pdev, true);
5332 return 0;
5333}
5334
Tony Truongb1af8b62017-05-31 15:40:38 -07005335static void msm_pcie_config_l1(struct msm_pcie_dev_t *dev,
5336 struct pci_dev *pdev, bool enable)
5337{
5338 u32 val;
5339 u32 lnkcap_offset = pdev->pcie_cap + PCI_EXP_LNKCAP;
5340 u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
5341
5342 pci_read_config_dword(pdev, lnkcap_offset, &val);
5343 if (!(val & BIT(11))) {
5344 PCIE_DBG(dev,
5345 "PCIe: RC%d: PCI device does not support L1\n",
5346 dev->rc_idx);
5347 return;
5348 }
5349
5350 if (enable)
5351 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
5352 PCI_EXP_LNKCTL_ASPM_L1);
5353 else
5354 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset,
5355 PCI_EXP_LNKCTL_ASPM_L1, 0);
5356
5357 pci_read_config_dword(pdev, lnkctl_offset, &val);
5358 PCIE_DBG2(dev, "PCIe: RC%d: LINKCTRLSTATUS:0x%x\n", dev->rc_idx, val);
5359}
5360
Tony Truongbad3b742017-11-22 14:40:19 -08005361static int msm_pcie_config_l1_disable(struct pci_dev *pdev, void *dev)
5362{
5363 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
5364
5365 msm_pcie_config_l1(pcie_dev, pdev, false);
5366 return 0;
5367}
5368
5369static int msm_pcie_config_l1_enable(struct pci_dev *pdev, void *dev)
5370{
5371 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
5372
5373 msm_pcie_config_l1(pcie_dev, pdev, true);
5374 return 0;
5375}
5376
Tony Truongb1af8b62017-05-31 15:40:38 -07005377static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev,
5378 struct pci_dev *pdev, bool enable)
5379{
Tony Truonge26150f2017-12-18 15:38:51 -08005380 bool l1_1_pcipm_support, l1_2_pcipm_support;
5381 bool l1_1_aspm_support, l1_2_aspm_support;
Tony Truongb1af8b62017-05-31 15:40:38 -07005382 u32 val, val2;
5383 u32 l1ss_cap_id_offset, l1ss_cap_offset, l1ss_ctl1_offset;
5384 u32 devctl2_offset = pdev->pcie_cap + PCI_EXP_DEVCTL2;
5385
5386 l1ss_cap_id_offset = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
5387 if (!l1ss_cap_id_offset) {
5388 PCIE_DBG(dev,
5389 "PCIe: RC%d could not find L1ss capability register for device\n",
5390 dev->rc_idx);
5391 return;
5392 }
5393
5394 l1ss_cap_offset = l1ss_cap_id_offset + PCI_L1SS_CAP;
5395 l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
5396
5397 pci_read_config_dword(pdev, l1ss_cap_offset, &val);
Tony Truonge26150f2017-12-18 15:38:51 -08005398 l1_1_pcipm_support = !!(val & (PCI_L1SS_CAP_PCIPM_L1_1));
5399 l1_2_pcipm_support = !!(val & (PCI_L1SS_CAP_PCIPM_L1_2));
5400 l1_1_aspm_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_1));
5401 l1_2_aspm_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_2));
5402 if (!l1_1_pcipm_support && !l1_2_pcipm_support &&
5403 !l1_1_aspm_support && !l1_2_aspm_support) {
Tony Truongb1af8b62017-05-31 15:40:38 -07005404 PCIE_DBG(dev,
Tony Truonge26150f2017-12-18 15:38:51 -08005405 "PCIe: RC%d: PCI device does not support any L1ss\n",
Tony Truongb1af8b62017-05-31 15:40:38 -07005406 dev->rc_idx);
5407 return;
5408 }
5409
5410 /* Enable the AUX Clock and the Core Clk to be synchronous for L1ss */
5411 if (pci_is_root_bus(pdev->bus) && !dev->aux_clk_sync) {
5412 if (enable)
5413 msm_pcie_write_mask(dev->parf +
5414 PCIE20_PARF_SYS_CTRL, BIT(3), 0);
5415 else
5416 msm_pcie_write_mask(dev->parf +
5417 PCIE20_PARF_SYS_CTRL, 0, BIT(3));
5418 }
5419
5420 if (enable) {
5421 msm_pcie_config_clear_set_dword(pdev, devctl2_offset, 0,
5422 PCI_EXP_DEVCTL2_LTR_EN);
5423 msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset, 0,
Tony Truonge26150f2017-12-18 15:38:51 -08005424 (l1_1_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
5425 (l1_2_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
5426 (l1_1_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
5427 (l1_2_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0));
Tony Truongb1af8b62017-05-31 15:40:38 -07005428 } else {
5429 msm_pcie_config_clear_set_dword(pdev, devctl2_offset,
5430 PCI_EXP_DEVCTL2_LTR_EN, 0);
5431 msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset,
Tony Truonge26150f2017-12-18 15:38:51 -08005432 (l1_1_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
5433 (l1_2_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
5434 (l1_1_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
5435 (l1_2_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0), 0);
Tony Truongb1af8b62017-05-31 15:40:38 -07005436 }
5437
5438 pci_read_config_dword(pdev, l1ss_ctl1_offset, &val);
5439 PCIE_DBG2(dev, "PCIe: RC%d: L1SUB_CONTROL1:0x%x\n", dev->rc_idx, val);
5440
5441 pci_read_config_dword(pdev, devctl2_offset, &val2);
5442 PCIE_DBG2(dev, "PCIe: RC%d: DEVICE_CONTROL2_STATUS2::0x%x\n",
5443 dev->rc_idx, val2);
5444}
5445
Tony Truongbad3b742017-11-22 14:40:19 -08005446static int msm_pcie_config_l1ss_disable(struct pci_dev *pdev, void *dev)
5447{
5448 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
5449
5450 msm_pcie_config_l1ss(pcie_dev, pdev, false);
5451 return 0;
5452}
5453
5454static int msm_pcie_config_l1ss_enable(struct pci_dev *pdev, void *dev)
5455{
5456 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
5457
5458 msm_pcie_config_l1ss(pcie_dev, pdev, true);
5459 return 0;
5460}
5461
Tony Truongb1af8b62017-05-31 15:40:38 -07005462static void msm_pcie_config_clock_power_management(struct msm_pcie_dev_t *dev,
5463 struct pci_dev *pdev)
5464{
5465 u32 val;
5466 u32 lnkcap_offset = pdev->pcie_cap + PCI_EXP_LNKCAP;
5467 u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
5468
5469 if (pci_is_root_bus(pdev->bus))
5470 return;
5471
5472 pci_read_config_dword(pdev, lnkcap_offset, &val);
5473 if (val & PCI_EXP_LNKCAP_CLKPM)
5474 msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
5475 PCI_EXP_LNKCTL_CLKREQ_EN);
5476 else
5477 PCIE_DBG(dev,
5478 "PCIe: RC%d: PCI device does not support clock power management\n",
5479 dev->rc_idx);
5480}
5481
5482static void msm_pcie_config_link_pm(struct msm_pcie_dev_t *dev,
5483 struct pci_dev *pdev, bool enable)
5484{
5485 if (dev->common_clk_en)
5486 msm_pcie_config_clear_set_dword(pdev,
5487 pdev->pcie_cap + PCI_EXP_LNKCTL, 0,
5488 PCI_EXP_LNKCTL_CCC);
5489
5490 if (dev->clk_power_manage_en)
5491 msm_pcie_config_clock_power_management(dev, pdev);
5492 if (dev->l0s_supported)
5493 msm_pcie_config_l0s(dev, pdev, enable);
5494 if (dev->l1ss_supported)
5495 msm_pcie_config_l1ss(dev, pdev, enable);
5496 if (dev->l1_supported)
5497 msm_pcie_config_l1(dev, pdev, enable);
5498}
5499
5500static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev,
5501 struct pci_dev *pdev, bool enable)
5502{
5503 bool child_l0s_enable = 0, child_l1_enable = 0, child_l1ss_enable = 0;
5504
5505 if (!pdev->subordinate || !(&pdev->subordinate->devices)) {
5506 PCIE_DBG(dev,
5507 "PCIe: RC%d: no device connected to root complex\n",
5508 dev->rc_idx);
5509 return;
5510 }
5511
5512 if (dev->l0s_supported) {
5513 struct pci_dev *child_pdev, *c_pdev;
5514
5515 list_for_each_entry_safe(child_pdev, c_pdev,
5516 &pdev->subordinate->devices, bus_list) {
5517 u32 val;
5518
5519 pci_read_config_dword(child_pdev,
Tony Truonge023d012017-11-10 13:36:26 -08005520 child_pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
Tony Truongb1af8b62017-05-31 15:40:38 -07005521 child_l0s_enable = !!(val & PCI_EXP_LNKCTL_ASPM_L0S);
5522 if (child_l0s_enable)
5523 break;
5524 }
5525
5526 if (child_l0s_enable)
5527 msm_pcie_config_l0s(dev, pdev, enable);
5528 else
5529 dev->l0s_supported = false;
5530 }
5531
5532 if (dev->l1ss_supported) {
5533 struct pci_dev *child_pdev, *c_pdev;
5534
5535 list_for_each_entry_safe(child_pdev, c_pdev,
5536 &pdev->subordinate->devices, bus_list) {
5537 u32 val;
5538 u32 l1ss_cap_id_offset =
5539 pci_find_ext_capability(child_pdev,
5540 PCI_EXT_CAP_ID_L1SS);
5541
5542 if (!l1ss_cap_id_offset)
5543 continue;
5544
5545 pci_read_config_dword(child_pdev,
5546 l1ss_cap_id_offset + PCI_L1SS_CTL1, &val);
5547 child_l1ss_enable = !!(val &
Tony Truonge26150f2017-12-18 15:38:51 -08005548 (PCI_L1SS_CTL1_PCIPM_L1_1 |
5549 PCI_L1SS_CTL1_PCIPM_L1_2 |
5550 PCI_L1SS_CTL1_ASPM_L1_1 |
Tony Truongb1af8b62017-05-31 15:40:38 -07005551 PCI_L1SS_CTL1_ASPM_L1_2));
5552 if (child_l1ss_enable)
5553 break;
5554 }
5555
5556 if (child_l1ss_enable)
5557 msm_pcie_config_l1ss(dev, pdev, enable);
5558 else
5559 dev->l1ss_supported = false;
5560 }
5561
5562 if (dev->l1_supported) {
5563 struct pci_dev *child_pdev, *c_pdev;
5564
5565 list_for_each_entry_safe(child_pdev, c_pdev,
5566 &pdev->subordinate->devices, bus_list) {
5567 u32 val;
5568
5569 pci_read_config_dword(child_pdev,
Tony Truonge023d012017-11-10 13:36:26 -08005570 child_pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
Tony Truongb1af8b62017-05-31 15:40:38 -07005571 child_l1_enable = !!(val & PCI_EXP_LNKCTL_ASPM_L1);
5572 if (child_l1_enable)
5573 break;
5574 }
5575
5576 if (child_l1_enable)
5577 msm_pcie_config_l1(dev, pdev, enable);
5578 else
5579 dev->l1_supported = false;
5580 }
5581}
5582
Tony Truong7772e692017-04-13 17:03:34 -07005583static int msm_pcie_config_device(struct pci_dev *dev, void *pdev)
5584{
5585 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)pdev;
5586 u8 busnr = dev->bus->number;
5587 u8 slot = PCI_SLOT(dev->devfn);
5588 u8 func = PCI_FUNC(dev->devfn);
5589
5590 PCIE_DBG(pcie_dev, "PCIe: RC%d: configure PCI device %02x:%02x.%01x\n",
5591 pcie_dev->rc_idx, busnr, slot, func);
5592
Tony Truong2a022a02017-04-13 14:04:30 -07005593 msm_pcie_configure_sid(pcie_dev, dev);
5594
Tony Truongb1af8b62017-05-31 15:40:38 -07005595 if (!pci_is_root_bus(dev->bus))
5596 msm_pcie_config_link_pm(pcie_dev, dev, true);
5597
Tony Truong7772e692017-04-13 17:03:34 -07005598 return 0;
5599}
5600
5601/* Hook to setup PCI device during PCI framework scan */
5602int pcibios_add_device(struct pci_dev *dev)
5603{
5604 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5605
5606 return msm_pcie_config_device(dev, pcie_dev);
5607}
Tony Truong349ee492014-10-01 17:35:56 -07005608
5609static int msm_pcie_probe(struct platform_device *pdev)
5610{
5611 int ret = 0;
5612 int rc_idx = -1;
5613 int i, j;
5614
5615 PCIE_GEN_DBG("%s\n", __func__);
5616
5617 mutex_lock(&pcie_drv.drv_lock);
5618
5619 ret = of_property_read_u32((&pdev->dev)->of_node,
5620 "cell-index", &rc_idx);
5621 if (ret) {
5622 PCIE_GEN_DBG("Did not find RC index.\n");
5623 goto out;
5624 } else {
5625 if (rc_idx >= MAX_RC_NUM) {
5626 pr_err(
5627 "PCIe: Invalid RC Index %d (max supported = %d)\n",
5628 rc_idx, MAX_RC_NUM);
5629 goto out;
5630 }
5631 pcie_drv.rc_num++;
5632 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
5633 rc_idx);
5634 }
5635
5636 msm_pcie_dev[rc_idx].l0s_supported =
5637 of_property_read_bool((&pdev->dev)->of_node,
5638 "qcom,l0s-supported");
Tony Truong7416d722017-09-12 16:45:18 -07005639 if (msm_pcie_invert_l0s_support & BIT(rc_idx))
5640 msm_pcie_dev[rc_idx].l0s_supported =
5641 !msm_pcie_dev[rc_idx].l0s_supported;
Tony Truong349ee492014-10-01 17:35:56 -07005642 PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
5643 msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
5644 msm_pcie_dev[rc_idx].l1_supported =
5645 of_property_read_bool((&pdev->dev)->of_node,
5646 "qcom,l1-supported");
Tony Truong7416d722017-09-12 16:45:18 -07005647 if (msm_pcie_invert_l1_support & BIT(rc_idx))
5648 msm_pcie_dev[rc_idx].l1_supported =
5649 !msm_pcie_dev[rc_idx].l1_supported;
Tony Truong349ee492014-10-01 17:35:56 -07005650 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
5651 msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
5652 msm_pcie_dev[rc_idx].l1ss_supported =
5653 of_property_read_bool((&pdev->dev)->of_node,
5654 "qcom,l1ss-supported");
Tony Truong7416d722017-09-12 16:45:18 -07005655 if (msm_pcie_invert_l1ss_support & BIT(rc_idx))
5656 msm_pcie_dev[rc_idx].l1ss_supported =
5657 !msm_pcie_dev[rc_idx].l1ss_supported;
Tony Truong349ee492014-10-01 17:35:56 -07005658 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
5659 msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
5660 msm_pcie_dev[rc_idx].common_clk_en =
5661 of_property_read_bool((&pdev->dev)->of_node,
5662 "qcom,common-clk-en");
5663 PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
5664 msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
5665 msm_pcie_dev[rc_idx].clk_power_manage_en =
5666 of_property_read_bool((&pdev->dev)->of_node,
5667 "qcom,clk-power-manage-en");
5668 PCIE_DBG(&msm_pcie_dev[rc_idx],
5669 "Clock power management is %s enabled.\n",
5670 msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
5671 msm_pcie_dev[rc_idx].aux_clk_sync =
5672 of_property_read_bool((&pdev->dev)->of_node,
5673 "qcom,aux-clk-sync");
5674 PCIE_DBG(&msm_pcie_dev[rc_idx],
5675 "AUX clock is %s synchronous to Core clock.\n",
5676 msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
5677
5678 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
5679 of_property_read_bool((&pdev->dev)->of_node,
5680 "qcom,use-19p2mhz-aux-clk");
5681 PCIE_DBG(&msm_pcie_dev[rc_idx],
5682 "AUX clock frequency is %s 19.2MHz.\n",
5683 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
5684
5685 msm_pcie_dev[rc_idx].smmu_exist =
5686 of_property_read_bool((&pdev->dev)->of_node,
5687 "qcom,smmu-exist");
5688 PCIE_DBG(&msm_pcie_dev[rc_idx],
5689 "SMMU does %s exist.\n",
5690 msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
5691
5692 msm_pcie_dev[rc_idx].smmu_sid_base = 0;
5693 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
5694 &msm_pcie_dev[rc_idx].smmu_sid_base);
5695 if (ret)
5696 PCIE_DBG(&msm_pcie_dev[rc_idx],
5697 "RC%d SMMU sid base not found\n",
5698 msm_pcie_dev[rc_idx].rc_idx);
5699 else
5700 PCIE_DBG(&msm_pcie_dev[rc_idx],
5701 "RC%d: qcom,smmu-sid-base: 0x%x.\n",
5702 msm_pcie_dev[rc_idx].rc_idx,
5703 msm_pcie_dev[rc_idx].smmu_sid_base);
5704
Tony Truong9f2c7722017-02-28 15:02:27 -08005705 msm_pcie_dev[rc_idx].boot_option = 0;
5706 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,boot-option",
5707 &msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07005708 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08005709 "PCIe: RC%d boot option is 0x%x.\n",
5710 rc_idx, msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07005711
5712 msm_pcie_dev[rc_idx].phy_ver = 1;
5713 ret = of_property_read_u32((&pdev->dev)->of_node,
5714 "qcom,pcie-phy-ver",
5715 &msm_pcie_dev[rc_idx].phy_ver);
5716 if (ret)
5717 PCIE_DBG(&msm_pcie_dev[rc_idx],
5718 "RC%d: pcie-phy-ver does not exist.\n",
5719 msm_pcie_dev[rc_idx].rc_idx);
5720 else
5721 PCIE_DBG(&msm_pcie_dev[rc_idx],
5722 "RC%d: pcie-phy-ver: %d.\n",
5723 msm_pcie_dev[rc_idx].rc_idx,
5724 msm_pcie_dev[rc_idx].phy_ver);
5725
5726 msm_pcie_dev[rc_idx].n_fts = 0;
5727 ret = of_property_read_u32((&pdev->dev)->of_node,
5728 "qcom,n-fts",
5729 &msm_pcie_dev[rc_idx].n_fts);
5730
5731 if (ret)
5732 PCIE_DBG(&msm_pcie_dev[rc_idx],
5733 "n-fts does not exist. ret=%d\n", ret);
5734 else
5735 PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
5736 msm_pcie_dev[rc_idx].n_fts);
5737
Tony Truong24e02ba2017-08-30 14:53:14 -07005738 msm_pcie_dev[rc_idx].max_link_speed = GEN2_SPEED;
5739 ret = of_property_read_u32(pdev->dev.of_node,
5740 "qcom,max-link-speed",
5741 &msm_pcie_dev[rc_idx].max_link_speed);
5742 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC%d: max-link-speed: 0x%x.\n",
5743 rc_idx, msm_pcie_dev[rc_idx].max_link_speed);
5744
Tony Truong349ee492014-10-01 17:35:56 -07005745 msm_pcie_dev[rc_idx].ext_ref_clk =
5746 of_property_read_bool((&pdev->dev)->of_node,
5747 "qcom,ext-ref-clk");
5748 PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
5749 msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
5750
5751 msm_pcie_dev[rc_idx].ep_latency = 0;
5752 ret = of_property_read_u32((&pdev->dev)->of_node,
5753 "qcom,ep-latency",
5754 &msm_pcie_dev[rc_idx].ep_latency);
5755 if (ret)
5756 PCIE_DBG(&msm_pcie_dev[rc_idx],
5757 "RC%d: ep-latency does not exist.\n",
5758 rc_idx);
5759 else
5760 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
5761 rc_idx, msm_pcie_dev[rc_idx].ep_latency);
5762
Rama Krishna Phani A72fcfb22017-06-30 15:45:06 +05305763 msm_pcie_dev[rc_idx].switch_latency = 0;
5764 ret = of_property_read_u32((&pdev->dev)->of_node,
5765 "qcom,switch-latency",
5766 &msm_pcie_dev[rc_idx].switch_latency);
5767
5768 if (ret)
5769 PCIE_DBG(&msm_pcie_dev[rc_idx],
5770 "RC%d: switch-latency does not exist.\n",
5771 rc_idx);
5772 else
5773 PCIE_DBG(&msm_pcie_dev[rc_idx],
5774 "RC%d: switch-latency: 0x%x.\n",
5775 rc_idx, msm_pcie_dev[rc_idx].switch_latency);
5776
Tony Truong349ee492014-10-01 17:35:56 -07005777 msm_pcie_dev[rc_idx].wr_halt_size = 0;
5778 ret = of_property_read_u32(pdev->dev.of_node,
5779 "qcom,wr-halt-size",
5780 &msm_pcie_dev[rc_idx].wr_halt_size);
5781 if (ret)
5782 PCIE_DBG(&msm_pcie_dev[rc_idx],
5783 "RC%d: wr-halt-size not specified in dt. Use default value.\n",
5784 rc_idx);
5785 else
5786 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
5787 rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
5788
Tony Truong41e63ec2017-08-30 12:08:12 -07005789 msm_pcie_dev[rc_idx].slv_addr_space_size = SZ_16M;
5790 ret = of_property_read_u32(pdev->dev.of_node,
5791 "qcom,slv-addr-space-size",
5792 &msm_pcie_dev[rc_idx].slv_addr_space_size);
5793 PCIE_DBG(&msm_pcie_dev[rc_idx],
5794 "RC%d: slv-addr-space-size: 0x%x.\n",
5795 rc_idx, msm_pcie_dev[rc_idx].slv_addr_space_size);
5796
Tony Truong2b675ba2017-12-12 14:52:00 -08005797 msm_pcie_dev[rc_idx].phy_status_offset = 0;
5798 ret = of_property_read_u32(pdev->dev.of_node,
5799 "qcom,phy-status-offset",
5800 &msm_pcie_dev[rc_idx].phy_status_offset);
5801 if (ret) {
5802 PCIE_ERR(&msm_pcie_dev[rc_idx],
5803 "RC%d: failed to get PCIe PHY status offset.\n",
5804 rc_idx);
5805 goto decrease_rc_num;
5806 } else {
5807 PCIE_DBG(&msm_pcie_dev[rc_idx],
5808 "RC%d: phy-status-offset: 0x%x.\n",
5809 rc_idx, msm_pcie_dev[rc_idx].phy_status_offset);
5810 }
5811
Tony Truong349ee492014-10-01 17:35:56 -07005812 msm_pcie_dev[rc_idx].cpl_timeout = 0;
5813 ret = of_property_read_u32((&pdev->dev)->of_node,
5814 "qcom,cpl-timeout",
5815 &msm_pcie_dev[rc_idx].cpl_timeout);
5816 if (ret)
5817 PCIE_DBG(&msm_pcie_dev[rc_idx],
5818 "RC%d: Using default cpl-timeout.\n",
5819 rc_idx);
5820 else
5821 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
5822 rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
5823
5824 msm_pcie_dev[rc_idx].perst_delay_us_min =
5825 PERST_PROPAGATION_DELAY_US_MIN;
5826 ret = of_property_read_u32(pdev->dev.of_node,
5827 "qcom,perst-delay-us-min",
5828 &msm_pcie_dev[rc_idx].perst_delay_us_min);
5829 if (ret)
5830 PCIE_DBG(&msm_pcie_dev[rc_idx],
5831 "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
5832 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
5833 else
5834 PCIE_DBG(&msm_pcie_dev[rc_idx],
5835 "RC%d: perst-delay-us-min: %dus.\n",
5836 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
5837
5838 msm_pcie_dev[rc_idx].perst_delay_us_max =
5839 PERST_PROPAGATION_DELAY_US_MAX;
5840 ret = of_property_read_u32(pdev->dev.of_node,
5841 "qcom,perst-delay-us-max",
5842 &msm_pcie_dev[rc_idx].perst_delay_us_max);
5843 if (ret)
5844 PCIE_DBG(&msm_pcie_dev[rc_idx],
5845 "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
5846 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
5847 else
5848 PCIE_DBG(&msm_pcie_dev[rc_idx],
5849 "RC%d: perst-delay-us-max: %dus.\n",
5850 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
5851
5852 msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
5853 ret = of_property_read_u32(pdev->dev.of_node,
5854 "qcom,tlp-rd-size",
5855 &msm_pcie_dev[rc_idx].tlp_rd_size);
5856 if (ret)
5857 PCIE_DBG(&msm_pcie_dev[rc_idx],
5858 "RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
5859 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
5860 else
5861 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
5862 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
5863
5864 msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
5865 msm_pcie_dev[rc_idx].msi_gicm_base = 0;
5866 ret = of_property_read_u32((&pdev->dev)->of_node,
5867 "qcom,msi-gicm-addr",
5868 &msm_pcie_dev[rc_idx].msi_gicm_addr);
5869
5870 if (ret) {
5871 PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
5872 "msi-gicm-addr does not exist.\n");
5873 } else {
5874 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
5875 msm_pcie_dev[rc_idx].msi_gicm_addr);
5876
5877 ret = of_property_read_u32((&pdev->dev)->of_node,
5878 "qcom,msi-gicm-base",
5879 &msm_pcie_dev[rc_idx].msi_gicm_base);
5880
5881 if (ret) {
5882 PCIE_ERR(&msm_pcie_dev[rc_idx],
5883 "PCIe: RC%d: msi-gicm-base does not exist.\n",
5884 rc_idx);
5885 goto decrease_rc_num;
5886 } else {
5887 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
5888 msm_pcie_dev[rc_idx].msi_gicm_base);
5889 }
5890 }
5891
5892 msm_pcie_dev[rc_idx].scm_dev_id = 0;
5893 ret = of_property_read_u32((&pdev->dev)->of_node,
5894 "qcom,scm-dev-id",
5895 &msm_pcie_dev[rc_idx].scm_dev_id);
5896
5897 msm_pcie_dev[rc_idx].rc_idx = rc_idx;
5898 msm_pcie_dev[rc_idx].pdev = pdev;
5899 msm_pcie_dev[rc_idx].vreg_n = 0;
5900 msm_pcie_dev[rc_idx].gpio_n = 0;
5901 msm_pcie_dev[rc_idx].parf_deemph = 0;
5902 msm_pcie_dev[rc_idx].parf_swing = 0;
5903 msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
5904 msm_pcie_dev[rc_idx].user_suspend = false;
5905 msm_pcie_dev[rc_idx].disable_pc = false;
5906 msm_pcie_dev[rc_idx].saved_state = NULL;
5907 msm_pcie_dev[rc_idx].enumerated = false;
5908 msm_pcie_dev[rc_idx].num_active_ep = 0;
5909 msm_pcie_dev[rc_idx].num_ep = 0;
5910 msm_pcie_dev[rc_idx].pending_ep_reg = false;
5911 msm_pcie_dev[rc_idx].phy_len = 0;
Tony Truong349ee492014-10-01 17:35:56 -07005912 msm_pcie_dev[rc_idx].phy_sequence = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07005913 msm_pcie_dev[rc_idx].event_reg = NULL;
5914 msm_pcie_dev[rc_idx].linkdown_counter = 0;
5915 msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
5916 msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
5917 msm_pcie_dev[rc_idx].rc_corr_counter = 0;
5918 msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
5919 msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
5920 msm_pcie_dev[rc_idx].ep_corr_counter = 0;
5921 msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
5922 msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
5923 msm_pcie_dev[rc_idx].suspending = false;
5924 msm_pcie_dev[rc_idx].wake_counter = 0;
5925 msm_pcie_dev[rc_idx].aer_enable = true;
Tony Truong7416d722017-09-12 16:45:18 -07005926 if (msm_pcie_invert_aer_support)
5927 msm_pcie_dev[rc_idx].aer_enable = false;
Tony Truong349ee492014-10-01 17:35:56 -07005928 msm_pcie_dev[rc_idx].power_on = false;
Tony Truong349ee492014-10-01 17:35:56 -07005929 msm_pcie_dev[rc_idx].use_msi = false;
5930 msm_pcie_dev[rc_idx].use_pinctrl = false;
5931 msm_pcie_dev[rc_idx].linkdown_panic = false;
5932 msm_pcie_dev[rc_idx].bridge_found = false;
5933 memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
5934 sizeof(msm_pcie_vreg_info));
5935 memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
5936 sizeof(msm_pcie_gpio_info));
5937 memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
5938 sizeof(msm_pcie_clk_info[rc_idx]));
5939 memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
5940 sizeof(msm_pcie_pipe_clk_info[rc_idx]));
5941 memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
5942 sizeof(msm_pcie_res_info));
5943 memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
5944 sizeof(msm_pcie_irq_info));
5945 memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
5946 sizeof(msm_pcie_msi_info));
5947 memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
5948 sizeof(msm_pcie_reset_info[rc_idx]));
5949 memcpy(msm_pcie_dev[rc_idx].pipe_reset,
5950 msm_pcie_pipe_reset_info[rc_idx],
5951 sizeof(msm_pcie_pipe_reset_info[rc_idx]));
5952 msm_pcie_dev[rc_idx].shadow_en = true;
5953 for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
5954 msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
5955 for (i = 0; i < MAX_DEVICE_NUM; i++)
5956 for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
5957 msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
5958 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5959 msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
5960 msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
5961 msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
5962 msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
5963 msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005964 msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07005965 msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
5966 msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
5967 msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
5968 msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
5969 }
5970
Tony Truongbd9a3412017-02-27 18:30:13 -08005971 dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
Tony Truongbd9a3412017-02-27 18:30:13 -08005972
Tony Truong349ee492014-10-01 17:35:56 -07005973 ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
5974 msm_pcie_dev[rc_idx].pdev);
5975
5976 if (ret)
5977 goto decrease_rc_num;
5978
5979 msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
5980 if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
5981 PCIE_ERR(&msm_pcie_dev[rc_idx],
5982 "PCIe: RC%d failed to get pinctrl\n",
5983 rc_idx);
5984 else
5985 msm_pcie_dev[rc_idx].use_pinctrl = true;
5986
5987 if (msm_pcie_dev[rc_idx].use_pinctrl) {
5988 msm_pcie_dev[rc_idx].pins_default =
5989 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
5990 "default");
5991 if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
5992 PCIE_ERR(&msm_pcie_dev[rc_idx],
5993 "PCIe: RC%d could not get pinctrl default state\n",
5994 rc_idx);
5995 msm_pcie_dev[rc_idx].pins_default = NULL;
5996 }
5997
5998 msm_pcie_dev[rc_idx].pins_sleep =
5999 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
6000 "sleep");
6001 if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
6002 PCIE_ERR(&msm_pcie_dev[rc_idx],
6003 "PCIe: RC%d could not get pinctrl sleep state\n",
6004 rc_idx);
6005 msm_pcie_dev[rc_idx].pins_sleep = NULL;
6006 }
6007 }
6008
6009 ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
6010 if (ret) {
6011 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6012 goto decrease_rc_num;
6013 }
6014
6015 ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
6016 if (ret) {
6017 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6018 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6019 goto decrease_rc_num;
6020 }
6021
Tony Truong14a5ddf2017-04-20 11:04:03 -07006022 msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
6023
Tony Truong349ee492014-10-01 17:35:56 -07006024 msm_pcie_dev[rc_idx].drv_ready = true;
6025
Tony Truong9f2c7722017-02-28 15:02:27 -08006026 if (msm_pcie_dev[rc_idx].boot_option &
6027 MSM_PCIE_NO_PROBE_ENUMERATION) {
Tony Truong349ee492014-10-01 17:35:56 -07006028 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08006029 "PCIe: RC%d will be enumerated by client or endpoint.\n",
Tony Truong349ee492014-10-01 17:35:56 -07006030 rc_idx);
6031 mutex_unlock(&pcie_drv.drv_lock);
6032 return 0;
6033 }
6034
6035 ret = msm_pcie_enumerate(rc_idx);
6036
6037 if (ret)
6038 PCIE_ERR(&msm_pcie_dev[rc_idx],
6039 "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
6040 rc_idx);
6041 else
6042 PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
6043 rc_idx);
6044
6045 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
6046 dev_name(&(pdev->dev)));
6047
6048 mutex_unlock(&pcie_drv.drv_lock);
6049 return 0;
6050
6051decrease_rc_num:
6052 pcie_drv.rc_num--;
6053out:
6054 if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
6055 pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
6056 rc_idx);
6057 else
6058 PCIE_ERR(&msm_pcie_dev[rc_idx],
6059 "PCIe: Driver probe failed for RC%d:%d\n",
6060 rc_idx, ret);
6061
6062 mutex_unlock(&pcie_drv.drv_lock);
6063
6064 return ret;
6065}
6066
6067static int msm_pcie_remove(struct platform_device *pdev)
6068{
6069 int ret = 0;
6070 int rc_idx;
6071
6072 PCIE_GEN_DBG("PCIe:%s.\n", __func__);
6073
6074 mutex_lock(&pcie_drv.drv_lock);
6075
6076 ret = of_property_read_u32((&pdev->dev)->of_node,
6077 "cell-index", &rc_idx);
6078 if (ret) {
6079 pr_err("%s: Did not find RC index.\n", __func__);
6080 goto out;
6081 } else {
6082 pcie_drv.rc_num--;
6083 PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
6084 }
6085
6086 msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
6087 msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
6088 msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
6089 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
6090 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
6091
6092out:
6093 mutex_unlock(&pcie_drv.drv_lock);
6094
6095 return ret;
6096}
6097
6098static const struct of_device_id msm_pcie_match[] = {
6099 { .compatible = "qcom,pci-msm",
6100 },
6101 {}
6102};
6103
6104static struct platform_driver msm_pcie_driver = {
6105 .probe = msm_pcie_probe,
6106 .remove = msm_pcie_remove,
6107 .driver = {
6108 .name = "pci-msm",
6109 .owner = THIS_MODULE,
6110 .of_match_table = msm_pcie_match,
6111 },
6112};
6113
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006114static int __init pcie_init(void)
Tony Truong349ee492014-10-01 17:35:56 -07006115{
6116 int ret = 0, i;
6117 char rc_name[MAX_RC_NAME_LEN];
6118
6119 pr_alert("pcie:%s.\n", __func__);
6120
6121 pcie_drv.rc_num = 0;
6122 mutex_init(&pcie_drv.drv_lock);
Tony Truong349ee492014-10-01 17:35:56 -07006123
6124 for (i = 0; i < MAX_RC_NUM; i++) {
6125 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
6126 msm_pcie_dev[i].ipc_log =
6127 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6128 if (msm_pcie_dev[i].ipc_log == NULL)
6129 pr_err("%s: unable to create IPC log context for %s\n",
6130 __func__, rc_name);
6131 else
6132 PCIE_DBG(&msm_pcie_dev[i],
6133 "PCIe IPC logging is enable for RC%d\n",
6134 i);
6135 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
6136 msm_pcie_dev[i].ipc_log_long =
6137 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6138 if (msm_pcie_dev[i].ipc_log_long == NULL)
6139 pr_err("%s: unable to create IPC log context for %s\n",
6140 __func__, rc_name);
6141 else
6142 PCIE_DBG(&msm_pcie_dev[i],
6143 "PCIe IPC logging %s is enable for RC%d\n",
6144 rc_name, i);
6145 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
6146 msm_pcie_dev[i].ipc_log_dump =
6147 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
6148 if (msm_pcie_dev[i].ipc_log_dump == NULL)
6149 pr_err("%s: unable to create IPC log context for %s\n",
6150 __func__, rc_name);
6151 else
6152 PCIE_DBG(&msm_pcie_dev[i],
6153 "PCIe IPC logging %s is enable for RC%d\n",
6154 rc_name, i);
6155 spin_lock_init(&msm_pcie_dev[i].cfg_lock);
6156 msm_pcie_dev[i].cfg_access = true;
6157 mutex_init(&msm_pcie_dev[i].enumerate_lock);
6158 mutex_init(&msm_pcie_dev[i].setup_lock);
6159 mutex_init(&msm_pcie_dev[i].recovery_lock);
Tony Truong349ee492014-10-01 17:35:56 -07006160 spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
Tony Truongbab696f2017-11-15 16:38:51 -08006161 spin_lock_init(&msm_pcie_dev[i].irq_lock);
Tony Truong349ee492014-10-01 17:35:56 -07006162 msm_pcie_dev[i].drv_ready = false;
6163 }
6164 for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
6165 msm_pcie_dev_tbl[i].bdf = 0;
6166 msm_pcie_dev_tbl[i].dev = NULL;
6167 msm_pcie_dev_tbl[i].short_bdf = 0;
6168 msm_pcie_dev_tbl[i].sid = 0;
6169 msm_pcie_dev_tbl[i].domain = -1;
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006170 msm_pcie_dev_tbl[i].conf_base = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07006171 msm_pcie_dev_tbl[i].phy_address = 0;
6172 msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
6173 msm_pcie_dev_tbl[i].event_reg = NULL;
6174 msm_pcie_dev_tbl[i].registered = true;
6175 }
6176
6177 msm_pcie_debugfs_init();
6178
6179 ret = platform_driver_register(&msm_pcie_driver);
6180
6181 return ret;
6182}
6183
6184static void __exit pcie_exit(void)
6185{
Tony Truongbd9a3412017-02-27 18:30:13 -08006186 int i;
6187
Tony Truong349ee492014-10-01 17:35:56 -07006188 PCIE_GEN_DBG("pcie:%s.\n", __func__);
6189
6190 platform_driver_unregister(&msm_pcie_driver);
6191
6192 msm_pcie_debugfs_exit();
Tony Truongbd9a3412017-02-27 18:30:13 -08006193
6194 for (i = 0; i < MAX_RC_NUM; i++)
6195 msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
Tony Truong349ee492014-10-01 17:35:56 -07006196}
6197
6198subsys_initcall_sync(pcie_init);
6199module_exit(pcie_exit);
6200
6201
6202/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
6203static void msm_pcie_fixup_early(struct pci_dev *dev)
6204{
6205 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6206
6207 PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
Tony Truong8ff900c2017-12-20 11:21:03 -08006208 if (pci_is_root_bus(dev->bus))
Tony Truong349ee492014-10-01 17:35:56 -07006209 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
6210}
Tony Truong8ff900c2017-12-20 11:21:03 -08006211DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
Tony Truong349ee492014-10-01 17:35:56 -07006212 msm_pcie_fixup_early);
6213
6214/* Suspend the PCIe link */
6215static int msm_pcie_pm_suspend(struct pci_dev *dev,
6216 void *user, void *data, u32 options)
6217{
6218 int ret = 0;
6219 u32 val = 0;
6220 int ret_l23;
6221 unsigned long irqsave_flags;
6222 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6223
6224 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6225
Tony Truongbab696f2017-11-15 16:38:51 -08006226 spin_lock_irqsave(&pcie_dev->irq_lock, irqsave_flags);
Tony Truong349ee492014-10-01 17:35:56 -07006227 pcie_dev->suspending = true;
Tony Truongbab696f2017-11-15 16:38:51 -08006228 spin_unlock_irqrestore(&pcie_dev->irq_lock, irqsave_flags);
Tony Truong349ee492014-10-01 17:35:56 -07006229
6230 if (!pcie_dev->power_on) {
6231 PCIE_DBG(pcie_dev,
6232 "PCIe: power of RC%d has been turned off.\n",
6233 pcie_dev->rc_idx);
6234 return ret;
6235 }
6236
6237 if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
6238 && msm_pcie_confirm_linkup(pcie_dev, true, true,
6239 pcie_dev->conf)) {
6240 ret = pci_save_state(dev);
6241 pcie_dev->saved_state = pci_store_saved_state(dev);
6242 }
6243 if (ret) {
6244 PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
6245 pcie_dev->rc_idx, ret);
6246 pcie_dev->suspending = false;
6247 return ret;
6248 }
6249
6250 spin_lock_irqsave(&pcie_dev->cfg_lock,
6251 pcie_dev->irqsave_flags);
6252 pcie_dev->cfg_access = false;
6253 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6254 pcie_dev->irqsave_flags);
6255
6256 msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
6257 BIT(4));
6258
6259 PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
6260 pcie_dev->rc_idx);
6261
6262 ret_l23 = readl_poll_timeout((pcie_dev->parf
6263 + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
6264
6265 /* check L23_Ready */
6266 PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
6267 pcie_dev->rc_idx,
6268 readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
6269 if (!ret_l23)
6270 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
6271 pcie_dev->rc_idx);
6272 else
6273 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
6274 pcie_dev->rc_idx);
6275
Tony Truong349ee492014-10-01 17:35:56 -07006276 if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
6277 pinctrl_select_state(pcie_dev->pinctrl,
6278 pcie_dev->pins_sleep);
6279
Tony Truong4e969782017-04-28 18:17:04 -07006280 msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6281
Tony Truong349ee492014-10-01 17:35:56 -07006282 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6283
6284 return ret;
6285}
6286
6287static void msm_pcie_fixup_suspend(struct pci_dev *dev)
6288{
6289 int ret;
6290 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6291
6292 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6293
Tony Truong8ff900c2017-12-20 11:21:03 -08006294 if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED ||
6295 !pci_is_root_bus(dev->bus))
Tony Truong349ee492014-10-01 17:35:56 -07006296 return;
6297
6298 spin_lock_irqsave(&pcie_dev->cfg_lock,
6299 pcie_dev->irqsave_flags);
6300 if (pcie_dev->disable_pc) {
6301 PCIE_DBG(pcie_dev,
6302 "RC%d: Skip suspend because of user request\n",
6303 pcie_dev->rc_idx);
6304 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6305 pcie_dev->irqsave_flags);
6306 return;
6307 }
6308 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6309 pcie_dev->irqsave_flags);
6310
6311 mutex_lock(&pcie_dev->recovery_lock);
6312
6313 ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
6314 if (ret)
6315 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
6316 pcie_dev->rc_idx, ret);
6317
6318 mutex_unlock(&pcie_dev->recovery_lock);
6319}
Tony Truong8ff900c2017-12-20 11:21:03 -08006320DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
Tony Truong349ee492014-10-01 17:35:56 -07006321 msm_pcie_fixup_suspend);
6322
6323/* Resume the PCIe link */
6324static int msm_pcie_pm_resume(struct pci_dev *dev,
6325 void *user, void *data, u32 options)
6326{
6327 int ret;
6328 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6329
6330 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6331
6332 if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
6333 pinctrl_select_state(pcie_dev->pinctrl,
6334 pcie_dev->pins_default);
6335
6336 spin_lock_irqsave(&pcie_dev->cfg_lock,
6337 pcie_dev->irqsave_flags);
6338 pcie_dev->cfg_access = true;
6339 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6340 pcie_dev->irqsave_flags);
6341
6342 ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6343 if (ret) {
6344 PCIE_ERR(pcie_dev,
6345 "PCIe: RC%d fail to enable PCIe link in resume.\n",
6346 pcie_dev->rc_idx);
6347 return ret;
6348 }
6349
6350 pcie_dev->suspending = false;
6351 PCIE_DBG(pcie_dev,
6352 "dev->bus->number = %d dev->bus->primary = %d\n",
6353 dev->bus->number, dev->bus->primary);
6354
6355 if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
6356 PCIE_DBG(pcie_dev,
6357 "RC%d: entry of PCI framework restore state\n",
6358 pcie_dev->rc_idx);
6359
6360 pci_load_and_free_saved_state(dev,
6361 &pcie_dev->saved_state);
6362 pci_restore_state(dev);
6363
6364 PCIE_DBG(pcie_dev,
6365 "RC%d: exit of PCI framework restore state\n",
6366 pcie_dev->rc_idx);
6367 }
6368
6369 if (pcie_dev->bridge_found) {
6370 PCIE_DBG(pcie_dev,
6371 "RC%d: entry of PCIe recover config\n",
6372 pcie_dev->rc_idx);
6373
6374 msm_pcie_recover_config(dev);
6375
6376 PCIE_DBG(pcie_dev,
6377 "RC%d: exit of PCIe recover config\n",
6378 pcie_dev->rc_idx);
6379 }
6380
6381 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6382
6383 return ret;
6384}
6385
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006386static void msm_pcie_fixup_resume(struct pci_dev *dev)
Tony Truong349ee492014-10-01 17:35:56 -07006387{
6388 int ret;
6389 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6390
6391 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6392
6393 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
Tony Truong8ff900c2017-12-20 11:21:03 -08006394 pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
Tony Truong349ee492014-10-01 17:35:56 -07006395 return;
6396
6397 mutex_lock(&pcie_dev->recovery_lock);
6398 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6399 if (ret)
6400 PCIE_ERR(pcie_dev,
6401 "PCIe: RC%d got failure in fixup resume:%d.\n",
6402 pcie_dev->rc_idx, ret);
6403
6404 mutex_unlock(&pcie_dev->recovery_lock);
6405}
Tony Truong8ff900c2017-12-20 11:21:03 -08006406DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
Tony Truong349ee492014-10-01 17:35:56 -07006407 msm_pcie_fixup_resume);
6408
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006409static void msm_pcie_fixup_resume_early(struct pci_dev *dev)
Tony Truong349ee492014-10-01 17:35:56 -07006410{
6411 int ret;
6412 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6413
6414 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6415
6416 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
Tony Truong8ff900c2017-12-20 11:21:03 -08006417 pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
Tony Truong349ee492014-10-01 17:35:56 -07006418 return;
6419
6420 mutex_lock(&pcie_dev->recovery_lock);
6421 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6422 if (ret)
6423 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
6424 pcie_dev->rc_idx, ret);
6425
6426 mutex_unlock(&pcie_dev->recovery_lock);
6427}
Tony Truong8ff900c2017-12-20 11:21:03 -08006428DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
Tony Truong349ee492014-10-01 17:35:56 -07006429 msm_pcie_fixup_resume_early);
6430
6431int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
6432 void *data, u32 options)
6433{
6434 int i, ret = 0;
6435 struct pci_dev *dev;
6436 u32 rc_idx = 0;
6437 struct msm_pcie_dev_t *pcie_dev;
6438
6439 PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
6440 pm_opt, busnr, options);
6441
6442
6443 if (!user) {
6444 pr_err("PCIe: endpoint device is NULL\n");
6445 ret = -ENODEV;
6446 goto out;
6447 }
6448
6449 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
6450
6451 if (pcie_dev) {
6452 rc_idx = pcie_dev->rc_idx;
6453 PCIE_DBG(pcie_dev,
6454 "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
6455 rc_idx, pm_opt, busnr, options);
6456 } else {
6457 pr_err(
6458 "PCIe: did not find RC for pci endpoint device.\n"
6459 );
6460 ret = -ENODEV;
6461 goto out;
6462 }
6463
6464 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6465 if (!busnr)
6466 break;
6467 if (user == pcie_dev->pcidev_table[i].dev) {
6468 if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
6469 break;
6470
6471 PCIE_ERR(pcie_dev,
6472 "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
6473 pcie_dev->rc_idx, busnr,
6474 pcie_dev->pcidev_table[i].bdf >> 24);
6475 ret = MSM_PCIE_ERROR;
6476 goto out;
6477 }
6478 }
6479
6480 if (i == MAX_DEVICE_NUM) {
6481 PCIE_ERR(pcie_dev,
6482 "PCIe: RC%d: endpoint device was not found in device table",
6483 pcie_dev->rc_idx);
6484 ret = MSM_PCIE_ERROR;
6485 goto out;
6486 }
6487
6488 dev = msm_pcie_dev[rc_idx].dev;
6489
6490 if (!msm_pcie_dev[rc_idx].drv_ready) {
6491 PCIE_ERR(&msm_pcie_dev[rc_idx],
6492 "RC%d has not been successfully probed yet\n",
6493 rc_idx);
6494 return -EPROBE_DEFER;
6495 }
6496
6497 switch (pm_opt) {
6498 case MSM_PCIE_SUSPEND:
6499 PCIE_DBG(&msm_pcie_dev[rc_idx],
6500 "User of RC%d requests to suspend the link\n", rc_idx);
6501 if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
6502 PCIE_DBG(&msm_pcie_dev[rc_idx],
6503 "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
6504 rc_idx, msm_pcie_dev[rc_idx].link_status);
6505
6506 if (!msm_pcie_dev[rc_idx].power_on) {
6507 PCIE_ERR(&msm_pcie_dev[rc_idx],
6508 "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
6509 rc_idx, msm_pcie_dev[rc_idx].link_status);
6510 break;
6511 }
6512
6513 if (msm_pcie_dev[rc_idx].pending_ep_reg) {
6514 PCIE_DBG(&msm_pcie_dev[rc_idx],
6515 "PCIe: RC%d: request to suspend the link is rejected\n",
6516 rc_idx);
6517 break;
6518 }
6519
6520 if (pcie_dev->num_active_ep) {
6521 PCIE_DBG(pcie_dev,
6522 "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
6523 pcie_dev->rc_idx, pcie_dev->num_active_ep);
6524 return ret;
6525 }
6526
6527 msm_pcie_dev[rc_idx].user_suspend = true;
6528
6529 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6530
6531 ret = msm_pcie_pm_suspend(dev, user, data, options);
6532 if (ret) {
6533 PCIE_ERR(&msm_pcie_dev[rc_idx],
6534 "PCIe: RC%d: user failed to suspend the link.\n",
6535 rc_idx);
6536 msm_pcie_dev[rc_idx].user_suspend = false;
6537 }
6538
6539 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6540 break;
6541 case MSM_PCIE_RESUME:
6542 PCIE_DBG(&msm_pcie_dev[rc_idx],
6543 "User of RC%d requests to resume the link\n", rc_idx);
6544 if (msm_pcie_dev[rc_idx].link_status !=
6545 MSM_PCIE_LINK_DISABLED) {
6546 PCIE_ERR(&msm_pcie_dev[rc_idx],
6547 "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
6548 rc_idx, msm_pcie_dev[rc_idx].link_status,
6549 msm_pcie_dev[rc_idx].num_active_ep);
6550 break;
6551 }
6552
6553 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6554 ret = msm_pcie_pm_resume(dev, user, data, options);
6555 if (ret) {
6556 PCIE_ERR(&msm_pcie_dev[rc_idx],
6557 "PCIe: RC%d: user failed to resume the link.\n",
6558 rc_idx);
6559 } else {
6560 PCIE_DBG(&msm_pcie_dev[rc_idx],
6561 "PCIe: RC%d: user succeeded to resume the link.\n",
6562 rc_idx);
6563
6564 msm_pcie_dev[rc_idx].user_suspend = false;
6565 }
6566
6567 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6568
6569 break;
6570 case MSM_PCIE_DISABLE_PC:
6571 PCIE_DBG(&msm_pcie_dev[rc_idx],
6572 "User of RC%d requests to keep the link always alive.\n",
6573 rc_idx);
6574 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6575 msm_pcie_dev[rc_idx].irqsave_flags);
6576 if (msm_pcie_dev[rc_idx].suspending) {
6577 PCIE_ERR(&msm_pcie_dev[rc_idx],
6578 "PCIe: RC%d Link has been suspended before request\n",
6579 rc_idx);
6580 ret = MSM_PCIE_ERROR;
6581 } else {
6582 msm_pcie_dev[rc_idx].disable_pc = true;
6583 }
6584 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6585 msm_pcie_dev[rc_idx].irqsave_flags);
6586 break;
6587 case MSM_PCIE_ENABLE_PC:
6588 PCIE_DBG(&msm_pcie_dev[rc_idx],
6589 "User of RC%d cancels the request of alive link.\n",
6590 rc_idx);
6591 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6592 msm_pcie_dev[rc_idx].irqsave_flags);
6593 msm_pcie_dev[rc_idx].disable_pc = false;
6594 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6595 msm_pcie_dev[rc_idx].irqsave_flags);
6596 break;
6597 default:
6598 PCIE_ERR(&msm_pcie_dev[rc_idx],
6599 "PCIe: RC%d: unsupported pm operation:%d.\n",
6600 rc_idx, pm_opt);
6601 ret = -ENODEV;
6602 goto out;
6603 }
6604
6605out:
6606 return ret;
6607}
6608EXPORT_SYMBOL(msm_pcie_pm_control);
6609
6610int msm_pcie_register_event(struct msm_pcie_register_event *reg)
6611{
6612 int i, ret = 0;
6613 struct msm_pcie_dev_t *pcie_dev;
6614
6615 if (!reg) {
6616 pr_err("PCIe: Event registration is NULL\n");
6617 return -ENODEV;
6618 }
6619
6620 if (!reg->user) {
6621 pr_err("PCIe: User of event registration is NULL\n");
6622 return -ENODEV;
6623 }
6624
6625 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6626
6627 if (!pcie_dev) {
6628 PCIE_ERR(pcie_dev, "%s",
6629 "PCIe: did not find RC for pci endpoint device.\n");
6630 return -ENODEV;
6631 }
6632
6633 if (pcie_dev->num_ep > 1) {
6634 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6635 if (reg->user ==
6636 pcie_dev->pcidev_table[i].dev) {
6637 pcie_dev->event_reg =
6638 pcie_dev->pcidev_table[i].event_reg;
6639
6640 if (!pcie_dev->event_reg) {
6641 pcie_dev->pcidev_table[i].registered =
6642 true;
6643
6644 pcie_dev->num_active_ep++;
6645 PCIE_DBG(pcie_dev,
6646 "PCIe: RC%d: number of active EP(s): %d.\n",
6647 pcie_dev->rc_idx,
6648 pcie_dev->num_active_ep);
6649 }
6650
6651 pcie_dev->event_reg = reg;
6652 pcie_dev->pcidev_table[i].event_reg = reg;
6653 PCIE_DBG(pcie_dev,
6654 "Event 0x%x is registered for RC %d\n",
6655 reg->events,
6656 pcie_dev->rc_idx);
6657
6658 break;
6659 }
6660 }
6661
6662 if (pcie_dev->pending_ep_reg) {
6663 for (i = 0; i < MAX_DEVICE_NUM; i++)
6664 if (!pcie_dev->pcidev_table[i].registered)
6665 break;
6666
6667 if (i == MAX_DEVICE_NUM)
6668 pcie_dev->pending_ep_reg = false;
6669 }
6670 } else {
6671 pcie_dev->event_reg = reg;
6672 PCIE_DBG(pcie_dev,
6673 "Event 0x%x is registered for RC %d\n", reg->events,
6674 pcie_dev->rc_idx);
6675 }
6676
6677 return ret;
6678}
6679EXPORT_SYMBOL(msm_pcie_register_event);
6680
6681int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
6682{
6683 int i, ret = 0;
6684 struct msm_pcie_dev_t *pcie_dev;
6685
6686 if (!reg) {
6687 pr_err("PCIe: Event deregistration is NULL\n");
6688 return -ENODEV;
6689 }
6690
6691 if (!reg->user) {
6692 pr_err("PCIe: User of event deregistration is NULL\n");
6693 return -ENODEV;
6694 }
6695
6696 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6697
6698 if (!pcie_dev) {
6699 PCIE_ERR(pcie_dev, "%s",
6700 "PCIe: did not find RC for pci endpoint device.\n");
6701 return -ENODEV;
6702 }
6703
6704 if (pcie_dev->num_ep > 1) {
6705 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6706 if (reg->user == pcie_dev->pcidev_table[i].dev) {
6707 if (pcie_dev->pcidev_table[i].event_reg) {
6708 pcie_dev->num_active_ep--;
6709 PCIE_DBG(pcie_dev,
6710 "PCIe: RC%d: number of active EP(s) left: %d.\n",
6711 pcie_dev->rc_idx,
6712 pcie_dev->num_active_ep);
6713 }
6714
6715 pcie_dev->event_reg = NULL;
6716 pcie_dev->pcidev_table[i].event_reg = NULL;
6717 PCIE_DBG(pcie_dev,
6718 "Event is deregistered for RC %d\n",
6719 pcie_dev->rc_idx);
6720
6721 break;
6722 }
6723 }
6724 } else {
6725 pcie_dev->event_reg = NULL;
6726 PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
6727 pcie_dev->rc_idx);
6728 }
6729
6730 return ret;
6731}
6732EXPORT_SYMBOL(msm_pcie_deregister_event);
6733
6734int msm_pcie_recover_config(struct pci_dev *dev)
6735{
6736 int ret = 0;
6737 struct msm_pcie_dev_t *pcie_dev;
6738
6739 if (dev) {
6740 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6741 PCIE_DBG(pcie_dev,
6742 "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
6743 } else {
6744 pr_err("PCIe: the input pci dev is NULL.\n");
6745 return -ENODEV;
6746 }
6747
6748 if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
6749 PCIE_DBG(pcie_dev,
6750 "Recover config space of RC%d and its EP\n",
6751 pcie_dev->rc_idx);
6752 pcie_dev->shadow_en = false;
6753 PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
6754 msm_pcie_cfg_recover(pcie_dev, true);
6755 PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
6756 msm_pcie_cfg_recover(pcie_dev, false);
6757 PCIE_DBG(pcie_dev,
6758 "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
6759 pcie_dev->rc_idx);
6760 pci_save_state(pcie_dev->dev);
6761 pci_save_state(dev);
6762 pcie_dev->shadow_en = true;
6763 PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
6764 pcie_dev->rc_idx);
6765 } else {
6766 PCIE_ERR(pcie_dev,
6767 "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
6768 pcie_dev->rc_idx);
6769 ret = -ENODEV;
6770 }
6771
6772 return ret;
6773}
6774EXPORT_SYMBOL(msm_pcie_recover_config);
6775
6776int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
6777{
6778 int ret = 0;
6779 struct msm_pcie_dev_t *pcie_dev;
6780
6781 if (dev) {
6782 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6783 PCIE_DBG(pcie_dev,
6784 "User requests to %s shadow\n",
6785 enable ? "enable" : "disable");
6786 } else {
6787 pr_err("PCIe: the input pci dev is NULL.\n");
6788 return -ENODEV;
6789 }
6790
6791 PCIE_DBG(pcie_dev,
6792 "The shadowing of RC%d is %s enabled currently.\n",
6793 pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
6794
6795 pcie_dev->shadow_en = enable;
6796
6797 PCIE_DBG(pcie_dev,
6798 "Shadowing of RC%d is turned %s upon user's request.\n",
6799 pcie_dev->rc_idx, enable ? "on" : "off");
6800
6801 return ret;
6802}
6803EXPORT_SYMBOL(msm_pcie_shadow_control);