blob: 361d7dd060e04e124bbf6bd5ef85fa02c9330f92 [file] [log] [blame]
Tony Truong349ee492014-10-01 17:35:56 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/gpio.h>
23#include <linux/iopoll.h>
24#include <linux/kernel.h>
25#include <linux/of_pci.h>
26#include <linux/pci.h>
Tony Truong52122a62017-03-23 18:00:34 -070027#include <linux/iommu.h>
Tony Truong349ee492014-10-01 17:35:56 -070028#include <linux/platform_device.h>
29#include <linux/regulator/consumer.h>
Tony Truongb213ac12017-04-05 15:21:20 -070030#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
Tony Truong349ee492014-10-01 17:35:56 -070031#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/of_gpio.h>
Tony Truongb213ac12017-04-05 15:21:20 -070034#include <linux/clk/qcom.h>
Tony Truong349ee492014-10-01 17:35:56 -070035#include <linux/reset.h>
36#include <linux/msm-bus.h>
37#include <linux/msm-bus-board.h>
38#include <linux/debugfs.h>
39#include <linux/uaccess.h>
40#include <linux/io.h>
41#include <linux/msi.h>
42#include <linux/interrupt.h>
43#include <linux/irq.h>
44#include <linux/irqdomain.h>
45#include <linux/pm_wakeup.h>
46#include <linux/compiler.h>
47#include <soc/qcom/scm.h>
48#include <linux/ipc_logging.h>
49#include <linux/msm_pcie.h>
50
Tony Truongb213ac12017-04-05 15:21:20 -070051#define PCIE_VENDOR_ID_RCP 0x17cb
52#define PCIE_DEVICE_ID_RCP 0x0106
53
54#define PCIE20_L1SUB_CONTROL1 0x1E4
55#define PCIE20_PARF_DBI_BASE_ADDR 0x350
56#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
57
Tony Truongb213ac12017-04-05 15:21:20 -070058#define PCS_BASE 0x800
Tony Truongb213ac12017-04-05 15:21:20 -070059
Tony Truong349ee492014-10-01 17:35:56 -070060#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
Tony Truong349ee492014-10-01 17:35:56 -070061
62#define PCIE_N_SW_RESET(n, m) (PCS_PORT(n, m) + 0x00)
63#define PCIE_N_POWER_DOWN_CONTROL(n, m) (PCS_PORT(n, m) + 0x04)
Tony Truong349ee492014-10-01 17:35:56 -070064#define PCIE_N_PCS_STATUS(n, m) (PCS_PORT(n, m) + 0x174)
Tony Truong349ee492014-10-01 17:35:56 -070065
66#define PCIE_COM_SW_RESET 0x400
67#define PCIE_COM_POWER_DOWN_CONTROL 0x404
Tony Truong349ee492014-10-01 17:35:56 -070068#define PCIE_COM_PCS_READY_STATUS 0x448
Tony Truong349ee492014-10-01 17:35:56 -070069
70#define PCIE20_PARF_SYS_CTRL 0x00
Tony Truongb213ac12017-04-05 15:21:20 -070071#define PCIE20_PARF_PM_CTRL 0x20
Tony Truong349ee492014-10-01 17:35:56 -070072#define PCIE20_PARF_PM_STTS 0x24
73#define PCIE20_PARF_PCS_DEEMPH 0x34
74#define PCIE20_PARF_PCS_SWING 0x38
75#define PCIE20_PARF_PHY_CTRL 0x40
76#define PCIE20_PARF_PHY_REFCLK 0x4C
77#define PCIE20_PARF_CONFIG_BITS 0x50
78#define PCIE20_PARF_TEST_BUS 0xE4
79#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
80#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
81#define PCIE20_PARF_LTSSM 0x1B0
82#define PCIE20_PARF_INT_ALL_STATUS 0x224
83#define PCIE20_PARF_INT_ALL_CLEAR 0x228
84#define PCIE20_PARF_INT_ALL_MASK 0x22C
85#define PCIE20_PARF_SID_OFFSET 0x234
86#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
87#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
Tony Truongb213ac12017-04-05 15:21:20 -070088#define PCIE20_PARF_DEVICE_TYPE 0x1000
Tony Truong349ee492014-10-01 17:35:56 -070089
90#define PCIE20_ELBI_VERSION 0x00
91#define PCIE20_ELBI_SYS_CTRL 0x04
92#define PCIE20_ELBI_SYS_STTS 0x08
93
94#define PCIE20_CAP 0x70
95#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
96#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
97
98#define PCIE20_COMMAND_STATUS 0x04
99#define PCIE20_HEADER_TYPE 0x0C
100#define PCIE20_BUSNUMBERS 0x18
101#define PCIE20_MEMORY_BASE_LIMIT 0x20
102#define PCIE20_BRIDGE_CTRL 0x3C
103#define PCIE20_DEVICE_CONTROL_STATUS 0x78
104#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
105
106#define PCIE20_AUX_CLK_FREQ_REG 0xB40
107#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
108#define PCIE20_ACK_N_FTS 0xff00
109
110#define PCIE20_PLR_IATU_VIEWPORT 0x900
111#define PCIE20_PLR_IATU_CTRL1 0x904
112#define PCIE20_PLR_IATU_CTRL2 0x908
113#define PCIE20_PLR_IATU_LBAR 0x90C
114#define PCIE20_PLR_IATU_UBAR 0x910
115#define PCIE20_PLR_IATU_LAR 0x914
116#define PCIE20_PLR_IATU_LTAR 0x918
117#define PCIE20_PLR_IATU_UTAR 0x91c
118
119#define PCIE20_CTRL1_TYPE_CFG0 0x04
120#define PCIE20_CTRL1_TYPE_CFG1 0x05
121
122#define PCIE20_CAP_ID 0x10
123#define L1SUB_CAP_ID 0x1E
124
125#define PCIE_CAP_PTR_OFFSET 0x34
126#define PCIE_EXT_CAP_OFFSET 0x100
127
128#define PCIE20_AER_UNCORR_ERR_STATUS_REG 0x104
129#define PCIE20_AER_CORR_ERR_STATUS_REG 0x110
130#define PCIE20_AER_ROOT_ERR_STATUS_REG 0x130
131#define PCIE20_AER_ERR_SRC_ID_REG 0x134
132
133#define RD 0
134#define WR 1
135#define MSM_PCIE_ERROR -1
136
137#define PERST_PROPAGATION_DELAY_US_MIN 1000
138#define PERST_PROPAGATION_DELAY_US_MAX 1005
139#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
140#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
141#define LINK_UP_TIMEOUT_US_MIN 5000
142#define LINK_UP_TIMEOUT_US_MAX 5100
143#define LINK_UP_CHECK_MAX_COUNT 20
144#define PHY_STABILIZATION_DELAY_US_MIN 995
145#define PHY_STABILIZATION_DELAY_US_MAX 1005
146#define POWER_DOWN_DELAY_US_MIN 10
147#define POWER_DOWN_DELAY_US_MAX 11
148#define LINKDOWN_INIT_WAITING_US_MIN 995
149#define LINKDOWN_INIT_WAITING_US_MAX 1005
150#define LINKDOWN_WAITING_US_MIN 4900
151#define LINKDOWN_WAITING_US_MAX 5100
152#define LINKDOWN_WAITING_COUNT 200
153
154#define PHY_READY_TIMEOUT_COUNT 10
155#define XMLH_LINK_UP 0x400
156#define MAX_LINK_RETRIES 5
157#define MAX_BUS_NUM 3
158#define MAX_PROP_SIZE 32
159#define MAX_RC_NAME_LEN 15
160#define MSM_PCIE_MAX_VREG 4
Tony Truongb213ac12017-04-05 15:21:20 -0700161#define MSM_PCIE_MAX_CLK 12
Tony Truong349ee492014-10-01 17:35:56 -0700162#define MSM_PCIE_MAX_PIPE_CLK 1
163#define MAX_RC_NUM 3
164#define MAX_DEVICE_NUM 20
165#define MAX_SHORT_BDF_NUM 16
166#define PCIE_TLP_RD_SIZE 0x5
167#define PCIE_MSI_NR_IRQS 256
168#define MSM_PCIE_MAX_MSI 32
169#define MAX_MSG_LEN 80
170#define PCIE_LOG_PAGES (50)
171#define PCIE_CONF_SPACE_DW 1024
172#define PCIE_CLEAR 0xDEADBEEF
173#define PCIE_LINK_DOWN 0xFFFFFFFF
174
Tony Truongb213ac12017-04-05 15:21:20 -0700175#define MSM_PCIE_MAX_RESET 5
Tony Truong349ee492014-10-01 17:35:56 -0700176#define MSM_PCIE_MAX_PIPE_RESET 1
177
178#define MSM_PCIE_MSI_PHY 0xa0000000
179#define PCIE20_MSI_CTRL_ADDR (0x820)
180#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
181#define PCIE20_MSI_CTRL_INTR_EN (0x828)
182#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
183#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
184#define PCIE20_MSI_CTRL_MAX 8
185
186/* PM control options */
187#define PM_IRQ 0x1
188#define PM_CLK 0x2
189#define PM_GPIO 0x4
190#define PM_VREG 0x8
191#define PM_PIPE_CLK 0x10
192#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
193
194#ifdef CONFIG_PHYS_ADDR_T_64BIT
195#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
196#else
197#define PCIE_UPPER_ADDR(addr) (0x0)
198#endif
199#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
200
201/* Config Space Offsets */
202#define BDF_OFFSET(bus, devfn) \
203 ((bus << 24) | (devfn << 16))
204
205#define PCIE_GEN_DBG(x...) do { \
206 if (msm_pcie_debug_mask) \
207 pr_alert(x); \
208 } while (0)
209
210#define PCIE_DBG(dev, fmt, arg...) do { \
211 if ((dev) && (dev)->ipc_log_long) \
212 ipc_log_string((dev)->ipc_log_long, \
213 "DBG1:%s: " fmt, __func__, arg); \
214 if ((dev) && (dev)->ipc_log) \
215 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
216 if (msm_pcie_debug_mask) \
217 pr_alert("%s: " fmt, __func__, arg); \
218 } while (0)
219
220#define PCIE_DBG2(dev, fmt, arg...) do { \
221 if ((dev) && (dev)->ipc_log) \
222 ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
223 if (msm_pcie_debug_mask) \
224 pr_alert("%s: " fmt, __func__, arg); \
225 } while (0)
226
227#define PCIE_DBG3(dev, fmt, arg...) do { \
228 if ((dev) && (dev)->ipc_log) \
229 ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
230 if (msm_pcie_debug_mask) \
231 pr_alert("%s: " fmt, __func__, arg); \
232 } while (0)
233
234#define PCIE_DUMP(dev, fmt, arg...) do { \
235 if ((dev) && (dev)->ipc_log_dump) \
236 ipc_log_string((dev)->ipc_log_dump, \
237 "DUMP:%s: " fmt, __func__, arg); \
238 } while (0)
239
240#define PCIE_DBG_FS(dev, fmt, arg...) do { \
241 if ((dev) && (dev)->ipc_log_dump) \
242 ipc_log_string((dev)->ipc_log_dump, \
243 "DBG_FS:%s: " fmt, __func__, arg); \
244 pr_alert("%s: " fmt, __func__, arg); \
245 } while (0)
246
247#define PCIE_INFO(dev, fmt, arg...) do { \
248 if ((dev) && (dev)->ipc_log_long) \
249 ipc_log_string((dev)->ipc_log_long, \
250 "INFO:%s: " fmt, __func__, arg); \
251 if ((dev) && (dev)->ipc_log) \
252 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
253 pr_info("%s: " fmt, __func__, arg); \
254 } while (0)
255
256#define PCIE_ERR(dev, fmt, arg...) do { \
257 if ((dev) && (dev)->ipc_log_long) \
258 ipc_log_string((dev)->ipc_log_long, \
259 "ERR:%s: " fmt, __func__, arg); \
260 if ((dev) && (dev)->ipc_log) \
261 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
262 pr_err("%s: " fmt, __func__, arg); \
263 } while (0)
264
265
266enum msm_pcie_res {
267 MSM_PCIE_RES_PARF,
268 MSM_PCIE_RES_PHY,
269 MSM_PCIE_RES_DM_CORE,
270 MSM_PCIE_RES_ELBI,
271 MSM_PCIE_RES_CONF,
272 MSM_PCIE_RES_IO,
273 MSM_PCIE_RES_BARS,
274 MSM_PCIE_RES_TCSR,
275 MSM_PCIE_MAX_RES,
276};
277
278enum msm_pcie_irq {
279 MSM_PCIE_INT_MSI,
280 MSM_PCIE_INT_A,
281 MSM_PCIE_INT_B,
282 MSM_PCIE_INT_C,
283 MSM_PCIE_INT_D,
284 MSM_PCIE_INT_PLS_PME,
285 MSM_PCIE_INT_PME_LEGACY,
286 MSM_PCIE_INT_PLS_ERR,
287 MSM_PCIE_INT_AER_LEGACY,
288 MSM_PCIE_INT_LINK_UP,
289 MSM_PCIE_INT_LINK_DOWN,
290 MSM_PCIE_INT_BRIDGE_FLUSH_N,
291 MSM_PCIE_INT_GLOBAL_INT,
292 MSM_PCIE_MAX_IRQ,
293};
294
295enum msm_pcie_irq_event {
296 MSM_PCIE_INT_EVT_LINK_DOWN = 1,
297 MSM_PCIE_INT_EVT_BME,
298 MSM_PCIE_INT_EVT_PM_TURNOFF,
299 MSM_PCIE_INT_EVT_DEBUG,
300 MSM_PCIE_INT_EVT_LTR,
301 MSM_PCIE_INT_EVT_MHI_Q6,
302 MSM_PCIE_INT_EVT_MHI_A7,
303 MSM_PCIE_INT_EVT_DSTATE_CHANGE,
304 MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
305 MSM_PCIE_INT_EVT_MMIO_WRITE,
306 MSM_PCIE_INT_EVT_CFG_WRITE,
307 MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
308 MSM_PCIE_INT_EVT_LINK_UP,
309 MSM_PCIE_INT_EVT_AER_LEGACY,
310 MSM_PCIE_INT_EVT_AER_ERR,
311 MSM_PCIE_INT_EVT_PME_LEGACY,
312 MSM_PCIE_INT_EVT_PLS_PME,
313 MSM_PCIE_INT_EVT_INTD,
314 MSM_PCIE_INT_EVT_INTC,
315 MSM_PCIE_INT_EVT_INTB,
316 MSM_PCIE_INT_EVT_INTA,
317 MSM_PCIE_INT_EVT_EDMA,
318 MSM_PCIE_INT_EVT_MSI_0,
319 MSM_PCIE_INT_EVT_MSI_1,
320 MSM_PCIE_INT_EVT_MSI_2,
321 MSM_PCIE_INT_EVT_MSI_3,
322 MSM_PCIE_INT_EVT_MSI_4,
323 MSM_PCIE_INT_EVT_MSI_5,
324 MSM_PCIE_INT_EVT_MSI_6,
325 MSM_PCIE_INT_EVT_MSI_7,
326 MSM_PCIE_INT_EVT_MAX = 30,
327};
328
329enum msm_pcie_gpio {
330 MSM_PCIE_GPIO_PERST,
331 MSM_PCIE_GPIO_WAKE,
332 MSM_PCIE_GPIO_EP,
333 MSM_PCIE_MAX_GPIO
334};
335
336enum msm_pcie_link_status {
337 MSM_PCIE_LINK_DEINIT,
338 MSM_PCIE_LINK_ENABLED,
339 MSM_PCIE_LINK_DISABLED
340};
341
Tony Truong9f2c7722017-02-28 15:02:27 -0800342enum msm_pcie_boot_option {
343 MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
344 MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
345};
346
Tony Truong349ee492014-10-01 17:35:56 -0700347/* gpio info structure */
348struct msm_pcie_gpio_info_t {
349 char *name;
350 uint32_t num;
351 bool out;
352 uint32_t on;
353 uint32_t init;
354 bool required;
355};
356
357/* voltage regulator info structrue */
358struct msm_pcie_vreg_info_t {
359 struct regulator *hdl;
360 char *name;
361 uint32_t max_v;
362 uint32_t min_v;
363 uint32_t opt_mode;
364 bool required;
365};
366
367/* reset info structure */
368struct msm_pcie_reset_info_t {
369 struct reset_control *hdl;
370 char *name;
371 bool required;
372};
373
374/* clock info structure */
375struct msm_pcie_clk_info_t {
376 struct clk *hdl;
377 char *name;
378 u32 freq;
379 bool config_mem;
380 bool required;
381};
382
383/* resource info structure */
384struct msm_pcie_res_info_t {
385 char *name;
386 struct resource *resource;
387 void __iomem *base;
388};
389
390/* irq info structrue */
391struct msm_pcie_irq_info_t {
392 char *name;
393 uint32_t num;
394};
395
396/* phy info structure */
397struct msm_pcie_phy_info_t {
398 u32 offset;
399 u32 val;
400 u32 delay;
401};
402
403/* PCIe device info structure */
404struct msm_pcie_device_info {
405 u32 bdf;
406 struct pci_dev *dev;
407 short short_bdf;
408 u32 sid;
409 int domain;
410 void __iomem *conf_base;
411 unsigned long phy_address;
412 u32 dev_ctrlstts_offset;
413 struct msm_pcie_register_event *event_reg;
414 bool registered;
415};
416
417/* msm pcie device structure */
418struct msm_pcie_dev_t {
419 struct platform_device *pdev;
420 struct pci_dev *dev;
421 struct regulator *gdsc;
422 struct regulator *gdsc_smmu;
423 struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
424 struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
425 struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
426 struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
427 struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
428 struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
429 struct msm_pcie_irq_info_t msi[MSM_PCIE_MAX_MSI];
430 struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
431 struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
432
433 void __iomem *parf;
434 void __iomem *phy;
435 void __iomem *elbi;
436 void __iomem *dm_core;
437 void __iomem *conf;
438 void __iomem *bars;
439 void __iomem *tcsr;
440
441 uint32_t axi_bar_start;
442 uint32_t axi_bar_end;
443
444 struct resource *dev_mem_res;
445 struct resource *dev_io_res;
446
447 uint32_t wake_n;
448 uint32_t vreg_n;
449 uint32_t gpio_n;
450 uint32_t parf_deemph;
451 uint32_t parf_swing;
452
453 bool cfg_access;
454 spinlock_t cfg_lock;
455 unsigned long irqsave_flags;
456 struct mutex enumerate_lock;
457 struct mutex setup_lock;
458
459 struct irq_domain *irq_domain;
460 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
461 uint32_t msi_gicm_addr;
462 uint32_t msi_gicm_base;
463 bool use_msi;
464
465 enum msm_pcie_link_status link_status;
466 bool user_suspend;
467 bool disable_pc;
468 struct pci_saved_state *saved_state;
469
470 struct wakeup_source ws;
471 struct msm_bus_scale_pdata *bus_scale_table;
472 uint32_t bus_client;
473
474 bool l0s_supported;
475 bool l1_supported;
476 bool l1ss_supported;
477 bool common_clk_en;
478 bool clk_power_manage_en;
479 bool aux_clk_sync;
480 bool aer_enable;
481 bool smmu_exist;
482 uint32_t smmu_sid_base;
483 uint32_t n_fts;
484 bool ext_ref_clk;
485 bool common_phy;
486 uint32_t ep_latency;
487 uint32_t wr_halt_size;
488 uint32_t cpl_timeout;
489 uint32_t current_bdf;
Tony Truong349ee492014-10-01 17:35:56 -0700490 uint32_t perst_delay_us_min;
491 uint32_t perst_delay_us_max;
492 uint32_t tlp_rd_size;
493 bool linkdown_panic;
Tony Truong9f2c7722017-02-28 15:02:27 -0800494 uint32_t boot_option;
Tony Truong349ee492014-10-01 17:35:56 -0700495
496 uint32_t rc_idx;
497 uint32_t phy_ver;
498 bool drv_ready;
499 bool enumerated;
500 struct work_struct handle_wake_work;
501 struct mutex recovery_lock;
502 spinlock_t linkdown_lock;
503 spinlock_t wakeup_lock;
504 spinlock_t global_irq_lock;
505 spinlock_t aer_lock;
506 ulong linkdown_counter;
507 ulong link_turned_on_counter;
508 ulong link_turned_off_counter;
509 ulong rc_corr_counter;
510 ulong rc_non_fatal_counter;
511 ulong rc_fatal_counter;
512 ulong ep_corr_counter;
513 ulong ep_non_fatal_counter;
514 ulong ep_fatal_counter;
515 bool suspending;
516 ulong wake_counter;
517 u32 num_active_ep;
518 u32 num_ep;
519 bool pending_ep_reg;
520 u32 phy_len;
521 u32 port_phy_len;
522 struct msm_pcie_phy_info_t *phy_sequence;
523 struct msm_pcie_phy_info_t *port_phy_sequence;
524 u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
525 u32 rc_shadow[PCIE_CONF_SPACE_DW];
526 bool shadow_en;
527 bool bridge_found;
528 struct msm_pcie_register_event *event_reg;
529 unsigned int scm_dev_id;
530 bool power_on;
531 void *ipc_log;
532 void *ipc_log_long;
533 void *ipc_log_dump;
534 bool use_19p2mhz_aux_clk;
535 bool use_pinctrl;
536 struct pinctrl *pinctrl;
537 struct pinctrl_state *pins_default;
538 struct pinctrl_state *pins_sleep;
539 struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
540};
541
542
543/* debug mask sys interface */
544static int msm_pcie_debug_mask;
545module_param_named(debug_mask, msm_pcie_debug_mask,
546 int, 0644);
547
548/* debugfs values */
549static u32 rc_sel;
550static u32 base_sel;
551static u32 wr_offset;
552static u32 wr_mask;
553static u32 wr_value;
554static ulong corr_counter_limit = 5;
555
556/* counter to keep track if common PHY needs to be configured */
557static u32 num_rc_on;
558
559/* global lock for PCIe common PHY */
560static struct mutex com_phy_lock;
561
562/* Table to track info of PCIe devices */
563static struct msm_pcie_device_info
564 msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
565
566/* PCIe driver state */
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700567static struct pcie_drv_sta {
Tony Truong349ee492014-10-01 17:35:56 -0700568 u32 rc_num;
569 struct mutex drv_lock;
570} pcie_drv;
571
572/* msm pcie device data */
573static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
574
575/* regulators */
576static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
577 {NULL, "vreg-3.3", 0, 0, 0, false},
578 {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
579 {NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
580 {NULL, "vreg-cx", 0, 0, 0, false}
581};
582
583/* GPIOs */
584static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
585 {"perst-gpio", 0, 1, 0, 0, 1},
586 {"wake-gpio", 0, 0, 0, 0, 0},
587 {"qcom,ep-gpio", 0, 1, 1, 0, 0}
588};
589
590/* resets */
591static struct msm_pcie_reset_info_t
592msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
593 {
Tony Truongb213ac12017-04-05 15:21:20 -0700594 {NULL, "pcie_0_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700595 {NULL, "pcie_phy_reset", false},
596 {NULL, "pcie_phy_com_reset", false},
597 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
598 {NULL, "pcie_0_phy_reset", false}
599 },
600 {
Tony Truongb213ac12017-04-05 15:21:20 -0700601 {NULL, "pcie_1_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700602 {NULL, "pcie_phy_reset", false},
603 {NULL, "pcie_phy_com_reset", false},
604 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
605 {NULL, "pcie_1_phy_reset", false}
606 },
607 {
Tony Truongb213ac12017-04-05 15:21:20 -0700608 {NULL, "pcie_2_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700609 {NULL, "pcie_phy_reset", false},
610 {NULL, "pcie_phy_com_reset", false},
611 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
612 {NULL, "pcie_2_phy_reset", false}
613 }
614};
615
616/* pipe reset */
617static struct msm_pcie_reset_info_t
618msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
619 {
620 {NULL, "pcie_0_phy_pipe_reset", false}
621 },
622 {
623 {NULL, "pcie_1_phy_pipe_reset", false}
624 },
625 {
626 {NULL, "pcie_2_phy_pipe_reset", false}
627 }
628};
629
630/* clocks */
631static struct msm_pcie_clk_info_t
632 msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
633 {
634 {NULL, "pcie_0_ref_clk_src", 0, false, false},
635 {NULL, "pcie_0_aux_clk", 1010000, false, true},
636 {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
637 {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
638 {NULL, "pcie_0_slv_axi_clk", 0, true, true},
639 {NULL, "pcie_0_ldo", 0, false, true},
640 {NULL, "pcie_0_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700641 {NULL, "pcie_0_slv_q2a_axi_clk", 0, false, false},
642 {NULL, "pcie_phy_refgen_clk", 0, false, false},
643 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700644 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
645 {NULL, "pcie_phy_aux_clk", 0, false, false}
646 },
647 {
648 {NULL, "pcie_1_ref_clk_src", 0, false, false},
649 {NULL, "pcie_1_aux_clk", 1010000, false, true},
650 {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
651 {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
652 {NULL, "pcie_1_slv_axi_clk", 0, true, true},
653 {NULL, "pcie_1_ldo", 0, false, true},
654 {NULL, "pcie_1_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700655 {NULL, "pcie_1_slv_q2a_axi_clk", 0, false, false},
656 {NULL, "pcie_phy_refgen_clk", 0, false, false},
657 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700658 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
659 {NULL, "pcie_phy_aux_clk", 0, false, false}
660 },
661 {
662 {NULL, "pcie_2_ref_clk_src", 0, false, false},
663 {NULL, "pcie_2_aux_clk", 1010000, false, true},
664 {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
665 {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
666 {NULL, "pcie_2_slv_axi_clk", 0, true, true},
667 {NULL, "pcie_2_ldo", 0, false, true},
668 {NULL, "pcie_2_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700669 {NULL, "pcie_2_slv_q2a_axi_clk", 0, false, false},
670 {NULL, "pcie_phy_refgen_clk", 0, false, false},
671 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700672 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
673 {NULL, "pcie_phy_aux_clk", 0, false, false}
674 }
675};
676
677/* Pipe Clocks */
678static struct msm_pcie_clk_info_t
679 msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
680 {
681 {NULL, "pcie_0_pipe_clk", 125000000, true, true},
682 },
683 {
684 {NULL, "pcie_1_pipe_clk", 125000000, true, true},
685 },
686 {
687 {NULL, "pcie_2_pipe_clk", 125000000, true, true},
688 }
689};
690
691/* resources */
692static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700693 {"parf", NULL, NULL},
694 {"phy", NULL, NULL},
695 {"dm_core", NULL, NULL},
696 {"elbi", NULL, NULL},
697 {"conf", NULL, NULL},
698 {"io", NULL, NULL},
699 {"bars", NULL, NULL},
700 {"tcsr", NULL, NULL}
Tony Truong349ee492014-10-01 17:35:56 -0700701};
702
703/* irqs */
704static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
705 {"int_msi", 0},
706 {"int_a", 0},
707 {"int_b", 0},
708 {"int_c", 0},
709 {"int_d", 0},
710 {"int_pls_pme", 0},
711 {"int_pme_legacy", 0},
712 {"int_pls_err", 0},
713 {"int_aer_legacy", 0},
714 {"int_pls_link_up", 0},
715 {"int_pls_link_down", 0},
716 {"int_bridge_flush_n", 0},
717 {"int_global_int", 0}
718};
719
720/* MSIs */
721static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
722 {"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
723 {"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
724 {"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
725 {"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
726 {"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
727 {"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
728 {"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
729 {"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
730};
731
Tony Truong7772e692017-04-13 17:03:34 -0700732static int msm_pcie_config_device(struct pci_dev *dev, void *pdev);
733
Tony Truong349ee492014-10-01 17:35:56 -0700734#ifdef CONFIG_ARM
735#define PCIE_BUS_PRIV_DATA(bus) \
736 (((struct pci_sys_data *)bus->sysdata)->private_data)
737
738static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
739
740static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
741{
742 msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
743 msm_pcie_sys_data[dev->rc_idx].private_data = dev;
744
745 return &msm_pcie_sys_data[dev->rc_idx];
746}
747
748static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
749{
750 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
751}
752#else
753#define PCIE_BUS_PRIV_DATA(bus) \
754 (struct msm_pcie_dev_t *)(bus->sysdata)
755
756static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
757{
758 return dev;
759}
760
761static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
762{
763}
764#endif
765
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700766static inline void msm_pcie_write_reg(void __iomem *base, u32 offset, u32 value)
Tony Truong349ee492014-10-01 17:35:56 -0700767{
768 writel_relaxed(value, base + offset);
769 /* ensure that changes propagated to the hardware */
770 wmb();
771}
772
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700773static inline void msm_pcie_write_reg_field(void __iomem *base, u32 offset,
Tony Truong349ee492014-10-01 17:35:56 -0700774 const u32 mask, u32 val)
775{
776 u32 shift = find_first_bit((void *)&mask, 32);
777 u32 tmp = readl_relaxed(base + offset);
778
779 tmp &= ~mask; /* clear written bits */
780 val = tmp | (val << shift);
781 writel_relaxed(val, base + offset);
782 /* ensure that changes propagated to the hardware */
783 wmb();
784}
785
786static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
787 struct msm_pcie_clk_info_t *info)
788{
789 int ret;
790
791 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
792 if (ret)
793 PCIE_ERR(dev,
794 "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
795 dev->rc_idx, info->name, ret);
796 else
797 PCIE_DBG2(dev,
798 "PCIe: RC%d configured core memory for clk %s.\n",
799 dev->rc_idx, info->name);
800
801 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
802 if (ret)
803 PCIE_ERR(dev,
804 "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
805 dev->rc_idx, info->name, ret);
806 else
807 PCIE_DBG2(dev,
808 "PCIe: RC%d configured peripheral memory for clk %s.\n",
809 dev->rc_idx, info->name);
810}
811
Tony Truong349ee492014-10-01 17:35:56 -0700812static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
813{
814 int i, size;
Tony Truong349ee492014-10-01 17:35:56 -0700815
816 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
817 for (i = 0; i < size; i += 32) {
818 PCIE_DUMP(dev,
819 "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
820 dev->rc_idx, i,
821 readl_relaxed(dev->phy + i),
822 readl_relaxed(dev->phy + (i + 4)),
823 readl_relaxed(dev->phy + (i + 8)),
824 readl_relaxed(dev->phy + (i + 12)),
825 readl_relaxed(dev->phy + (i + 16)),
826 readl_relaxed(dev->phy + (i + 20)),
827 readl_relaxed(dev->phy + (i + 24)),
828 readl_relaxed(dev->phy + (i + 28)));
829 }
830}
831
Tony Truong349ee492014-10-01 17:35:56 -0700832static void pcie_phy_init(struct msm_pcie_dev_t *dev)
833{
834 int i;
835 struct msm_pcie_phy_info_t *phy_seq;
836
837 PCIE_DBG(dev,
838 "RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
839 dev->rc_idx);
840
841 if (dev->phy_sequence) {
842 i = dev->phy_len;
843 phy_seq = dev->phy_sequence;
844 while (i--) {
845 msm_pcie_write_reg(dev->phy,
846 phy_seq->offset,
847 phy_seq->val);
848 if (phy_seq->delay)
849 usleep_range(phy_seq->delay,
850 phy_seq->delay + 1);
851 phy_seq++;
852 }
Tony Truong349ee492014-10-01 17:35:56 -0700853 }
854}
855
856static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
857{
858 int i;
859 struct msm_pcie_phy_info_t *phy_seq;
Tony Truong349ee492014-10-01 17:35:56 -0700860
861 PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
862
Tony Truong349ee492014-10-01 17:35:56 -0700863 if (dev->port_phy_sequence) {
864 i = dev->port_phy_len;
865 phy_seq = dev->port_phy_sequence;
866 while (i--) {
867 msm_pcie_write_reg(dev->phy,
868 phy_seq->offset,
869 phy_seq->val);
870 if (phy_seq->delay)
871 usleep_range(phy_seq->delay,
872 phy_seq->delay + 1);
873 phy_seq++;
874 }
Tony Truong349ee492014-10-01 17:35:56 -0700875 }
876
Tony Truong349ee492014-10-01 17:35:56 -0700877}
878
879static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
880{
881 if (dev->phy_ver >= 0x20) {
882 if (readl_relaxed(dev->phy +
883 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) &
884 BIT(6))
885 return false;
886 else
887 return true;
888 }
889
890 if (!(readl_relaxed(dev->phy + PCIE_COM_PCS_READY_STATUS) & 0x1))
891 return false;
892 else
893 return true;
894}
Tony Truong349ee492014-10-01 17:35:56 -0700895
896static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
897{
898 int ret, scm_ret;
899
900 if (!dev) {
901 pr_err("PCIe: the input pcie dev is NULL.\n");
902 return -ENODEV;
903 }
904
905 ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
906 if (ret || scm_ret) {
907 PCIE_ERR(dev,
908 "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
909 dev->rc_idx, ret, scm_ret);
910 return ret ? ret : -EINVAL;
911 }
912
913 return 0;
914}
915
916static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
917 u32 offset)
918{
919 if (offset % 4) {
920 PCIE_ERR(dev,
921 "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
922 dev->rc_idx, offset);
923 return MSM_PCIE_ERROR;
924 }
925
926 return 0;
927}
928
929static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
930 bool check_sw_stts,
931 bool check_ep,
932 void __iomem *ep_conf)
933{
934 u32 val;
935
936 if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
937 PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
938 dev->rc_idx);
939 return false;
940 }
941
942 if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
943 PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
944 dev->rc_idx);
945 return false;
946 }
947
948 val = readl_relaxed(dev->dm_core);
949 PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
950 dev->rc_idx, val);
951 if (val == PCIE_LINK_DOWN) {
952 PCIE_ERR(dev,
953 "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
954 dev->rc_idx, dev->rc_idx, val);
955 return false;
956 }
957
958 if (check_ep) {
959 val = readl_relaxed(ep_conf);
960 PCIE_DBG(dev,
961 "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
962 dev->rc_idx, val);
963 if (val == PCIE_LINK_DOWN) {
964 PCIE_ERR(dev,
965 "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
966 dev->rc_idx, dev->rc_idx, val);
967 return false;
968 }
969 }
970
971 return true;
972}
973
974static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
975{
976 int i, j;
977 u32 val = 0;
978 u32 *shadow;
Stephen Boydb5b8fc32017-06-21 08:59:11 -0700979 void __iomem *cfg = dev->conf;
Tony Truong349ee492014-10-01 17:35:56 -0700980
981 for (i = 0; i < MAX_DEVICE_NUM; i++) {
982 if (!rc && !dev->pcidev_table[i].bdf)
983 break;
984 if (rc) {
985 cfg = dev->dm_core;
986 shadow = dev->rc_shadow;
987 } else {
988 if (!msm_pcie_confirm_linkup(dev, false, true,
989 dev->pcidev_table[i].conf_base))
990 continue;
991
992 shadow = dev->ep_shadow[i];
993 PCIE_DBG(dev,
994 "PCIe Device: %02x:%02x.%01x\n",
995 dev->pcidev_table[i].bdf >> 24,
996 dev->pcidev_table[i].bdf >> 19 & 0x1f,
997 dev->pcidev_table[i].bdf >> 16 & 0x07);
998 }
999 for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
1000 val = shadow[j];
1001 if (val != PCIE_CLEAR) {
1002 PCIE_DBG3(dev,
1003 "PCIe: before recovery:cfg 0x%x:0x%x\n",
1004 j * 4, readl_relaxed(cfg + j * 4));
1005 PCIE_DBG3(dev,
1006 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1007 j, j * 4, val);
1008 writel_relaxed(val, cfg + j * 4);
1009 /* ensure changes propagated to the hardware */
1010 wmb();
1011 PCIE_DBG3(dev,
1012 "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
1013 j * 4, readl_relaxed(cfg + j * 4));
1014 }
1015 }
1016 if (rc)
1017 break;
1018
1019 pci_save_state(dev->pcidev_table[i].dev);
1020 cfg += SZ_4K;
1021 }
1022}
1023
1024static void msm_pcie_write_mask(void __iomem *addr,
1025 uint32_t clear_mask, uint32_t set_mask)
1026{
1027 uint32_t val;
1028
1029 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
1030 writel_relaxed(val, addr);
1031 wmb(); /* ensure data is written to hardware register */
1032}
1033
1034static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
1035{
1036 int i, size;
1037 u32 original;
1038
1039 PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
1040
1041 original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
1042 for (i = 1; i <= 0x1A; i++) {
1043 msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
1044 0xFF0000, i << 16);
1045 PCIE_DUMP(dev,
1046 "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
1047 dev->rc_idx,
1048 readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
1049 readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
1050 }
1051 writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
1052
1053 PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
1054
1055 size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
1056 for (i = 0; i < size; i += 32) {
1057 PCIE_DUMP(dev,
1058 "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1059 dev->rc_idx, i,
1060 readl_relaxed(dev->parf + i),
1061 readl_relaxed(dev->parf + (i + 4)),
1062 readl_relaxed(dev->parf + (i + 8)),
1063 readl_relaxed(dev->parf + (i + 12)),
1064 readl_relaxed(dev->parf + (i + 16)),
1065 readl_relaxed(dev->parf + (i + 20)),
1066 readl_relaxed(dev->parf + (i + 24)),
1067 readl_relaxed(dev->parf + (i + 28)));
1068 }
1069}
1070
1071static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
1072{
1073 PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
1074 dev->rc_idx, dev->enumerated ? "" : "not");
1075 PCIE_DBG_FS(dev, "PCIe: link is %s\n",
1076 (dev->link_status == MSM_PCIE_LINK_ENABLED)
1077 ? "enabled" : "disabled");
1078 PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
1079 dev->cfg_access ? "" : "not");
1080 PCIE_DBG_FS(dev, "use_msi is %d\n",
1081 dev->use_msi);
1082 PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
1083 dev->use_pinctrl);
1084 PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
1085 dev->use_19p2mhz_aux_clk);
1086 PCIE_DBG_FS(dev, "user_suspend is %d\n",
1087 dev->user_suspend);
1088 PCIE_DBG_FS(dev, "num_ep: %d\n",
1089 dev->num_ep);
1090 PCIE_DBG_FS(dev, "num_active_ep: %d\n",
1091 dev->num_active_ep);
1092 PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
1093 dev->pending_ep_reg ? "true" : "false");
1094 PCIE_DBG_FS(dev, "phy_len is %d",
1095 dev->phy_len);
1096 PCIE_DBG_FS(dev, "port_phy_len is %d",
1097 dev->port_phy_len);
1098 PCIE_DBG_FS(dev, "disable_pc is %d",
1099 dev->disable_pc);
1100 PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
1101 dev->l0s_supported ? "" : "not");
1102 PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
1103 dev->l1_supported ? "" : "not");
1104 PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
1105 dev->l1ss_supported ? "" : "not");
1106 PCIE_DBG_FS(dev, "common_clk_en is %d\n",
1107 dev->common_clk_en);
1108 PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
1109 dev->clk_power_manage_en);
1110 PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
1111 dev->aux_clk_sync);
1112 PCIE_DBG_FS(dev, "AER is %s enable\n",
1113 dev->aer_enable ? "" : "not");
1114 PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
1115 dev->ext_ref_clk);
Tony Truong9f2c7722017-02-28 15:02:27 -08001116 PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
1117 dev->boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07001118 PCIE_DBG_FS(dev, "phy_ver is %d\n",
1119 dev->phy_ver);
1120 PCIE_DBG_FS(dev, "drv_ready is %d\n",
1121 dev->drv_ready);
1122 PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
1123 dev->linkdown_panic);
1124 PCIE_DBG_FS(dev, "the link is %s suspending\n",
1125 dev->suspending ? "" : "not");
1126 PCIE_DBG_FS(dev, "shadow is %s enabled\n",
1127 dev->shadow_en ? "" : "not");
1128 PCIE_DBG_FS(dev, "the power of RC is %s on\n",
1129 dev->power_on ? "" : "not");
1130 PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
1131 dev->msi_gicm_addr);
1132 PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
1133 dev->msi_gicm_base);
1134 PCIE_DBG_FS(dev, "bus_client: %d\n",
1135 dev->bus_client);
Tony Truong349ee492014-10-01 17:35:56 -07001136 PCIE_DBG_FS(dev, "smmu does %s exist\n",
1137 dev->smmu_exist ? "" : "not");
1138 PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
1139 dev->smmu_sid_base);
1140 PCIE_DBG_FS(dev, "n_fts: %d\n",
1141 dev->n_fts);
1142 PCIE_DBG_FS(dev, "common_phy: %d\n",
1143 dev->common_phy);
1144 PCIE_DBG_FS(dev, "ep_latency: %dms\n",
1145 dev->ep_latency);
1146 PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
1147 dev->wr_halt_size);
1148 PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
1149 dev->cpl_timeout);
1150 PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
1151 dev->current_bdf);
1152 PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
1153 dev->perst_delay_us_min);
1154 PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
1155 dev->perst_delay_us_max);
1156 PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
1157 dev->tlp_rd_size);
1158 PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
1159 dev->rc_corr_counter);
1160 PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
1161 dev->rc_non_fatal_counter);
1162 PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
1163 dev->rc_fatal_counter);
1164 PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
1165 dev->ep_corr_counter);
1166 PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
1167 dev->ep_non_fatal_counter);
1168 PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
1169 dev->ep_fatal_counter);
1170 PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
1171 dev->linkdown_counter);
1172 PCIE_DBG_FS(dev, "wake_counter: %lu\n",
1173 dev->wake_counter);
1174 PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
1175 dev->link_turned_on_counter);
1176 PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
1177 dev->link_turned_off_counter);
1178}
1179
1180static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
1181{
1182 int i, j;
1183 u32 val = 0;
1184 u32 *shadow;
1185
1186 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1187 if (!rc && !dev->pcidev_table[i].bdf)
1188 break;
1189 if (rc) {
1190 shadow = dev->rc_shadow;
1191 } else {
1192 shadow = dev->ep_shadow[i];
1193 PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
1194 dev->pcidev_table[i].bdf >> 24,
1195 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1196 dev->pcidev_table[i].bdf >> 16 & 0x07);
1197 }
1198 for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
1199 val = shadow[j];
1200 if (val != PCIE_CLEAR) {
1201 PCIE_DBG_FS(dev,
1202 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1203 j, j * 4, val);
1204 }
1205 }
1206 if (rc)
1207 break;
1208 }
1209}
1210
1211static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
1212 u32 testcase)
1213{
1214 int ret, i;
1215 u32 base_sel_size = 0;
1216 u32 val = 0;
1217 u32 current_offset = 0;
1218 u32 ep_l1sub_ctrl1_offset = 0;
1219 u32 ep_l1sub_cap_reg1_offset = 0;
1220 u32 ep_link_ctrlstts_offset = 0;
1221 u32 ep_dev_ctrl2stts2_offset = 0;
1222
1223 if (testcase >= 5 && testcase <= 10) {
1224 current_offset =
1225 readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
1226
1227 while (current_offset) {
1228 val = readl_relaxed(dev->conf + current_offset);
1229 if ((val & 0xff) == PCIE20_CAP_ID) {
1230 ep_link_ctrlstts_offset = current_offset +
1231 0x10;
1232 ep_dev_ctrl2stts2_offset = current_offset +
1233 0x28;
1234 break;
1235 }
1236 current_offset = (val >> 8) & 0xff;
1237 }
1238
1239 if (!ep_link_ctrlstts_offset)
1240 PCIE_DBG(dev,
1241 "RC%d endpoint does not support PCIe capability registers\n",
1242 dev->rc_idx);
1243 else
1244 PCIE_DBG(dev,
1245 "RC%d: ep_link_ctrlstts_offset: 0x%x\n",
1246 dev->rc_idx, ep_link_ctrlstts_offset);
1247 }
1248
1249 switch (testcase) {
1250 case 0: /* output status */
1251 PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
1252 dev->rc_idx);
1253 msm_pcie_show_status(dev);
1254 break;
1255 case 1: /* disable link */
1256 PCIE_DBG_FS(dev,
1257 "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
1258 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
1259 dev->dev, NULL,
1260 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1261 if (ret)
1262 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
1263 __func__);
1264 else
1265 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
1266 __func__);
1267 break;
1268 case 2: /* enable link and recover config space for RC and EP */
1269 PCIE_DBG_FS(dev,
1270 "\n\nPCIe: RC%d: enable link and recover config space\n\n",
1271 dev->rc_idx);
1272 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
1273 dev->dev, NULL,
1274 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1275 if (ret)
1276 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
1277 __func__);
1278 else {
1279 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
1280 msm_pcie_recover_config(dev->dev);
1281 }
1282 break;
1283 case 3: /*
1284 * disable and enable link, recover config space for
1285 * RC and EP
1286 */
1287 PCIE_DBG_FS(dev,
1288 "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
1289 dev->rc_idx);
1290 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
1291 dev->dev, NULL,
1292 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1293 if (ret)
1294 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
1295 __func__);
1296 else
1297 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
1298 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
1299 dev->dev, NULL,
1300 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1301 if (ret)
1302 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
1303 __func__);
1304 else {
1305 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
1306 msm_pcie_recover_config(dev->dev);
1307 }
1308 break;
1309 case 4: /* dump shadow registers for RC and EP */
1310 PCIE_DBG_FS(dev,
1311 "\n\nPCIe: RC%d: dumping RC shadow registers\n",
1312 dev->rc_idx);
1313 msm_pcie_shadow_dump(dev, true);
1314
1315 PCIE_DBG_FS(dev,
1316 "\n\nPCIe: RC%d: dumping EP shadow registers\n",
1317 dev->rc_idx);
1318 msm_pcie_shadow_dump(dev, false);
1319 break;
1320 case 5: /* disable L0s */
1321 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
1322 dev->rc_idx);
1323 msm_pcie_write_mask(dev->dm_core +
1324 PCIE20_CAP_LINKCTRLSTATUS,
1325 BIT(0), 0);
1326 msm_pcie_write_mask(dev->conf +
1327 ep_link_ctrlstts_offset,
1328 BIT(0), 0);
1329 if (dev->shadow_en) {
1330 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1331 readl_relaxed(dev->dm_core +
1332 PCIE20_CAP_LINKCTRLSTATUS);
1333 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1334 readl_relaxed(dev->conf +
1335 ep_link_ctrlstts_offset);
1336 }
1337 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1338 readl_relaxed(dev->dm_core +
1339 PCIE20_CAP_LINKCTRLSTATUS));
1340 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1341 readl_relaxed(dev->conf +
1342 ep_link_ctrlstts_offset));
1343 break;
1344 case 6: /* enable L0s */
1345 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
1346 dev->rc_idx);
1347 msm_pcie_write_mask(dev->dm_core +
1348 PCIE20_CAP_LINKCTRLSTATUS,
1349 0, BIT(0));
1350 msm_pcie_write_mask(dev->conf +
1351 ep_link_ctrlstts_offset,
1352 0, BIT(0));
1353 if (dev->shadow_en) {
1354 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1355 readl_relaxed(dev->dm_core +
1356 PCIE20_CAP_LINKCTRLSTATUS);
1357 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1358 readl_relaxed(dev->conf +
1359 ep_link_ctrlstts_offset);
1360 }
1361 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1362 readl_relaxed(dev->dm_core +
1363 PCIE20_CAP_LINKCTRLSTATUS));
1364 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1365 readl_relaxed(dev->conf +
1366 ep_link_ctrlstts_offset));
1367 break;
1368 case 7: /* disable L1 */
1369 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
1370 dev->rc_idx);
1371 msm_pcie_write_mask(dev->dm_core +
1372 PCIE20_CAP_LINKCTRLSTATUS,
1373 BIT(1), 0);
1374 msm_pcie_write_mask(dev->conf +
1375 ep_link_ctrlstts_offset,
1376 BIT(1), 0);
1377 if (dev->shadow_en) {
1378 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1379 readl_relaxed(dev->dm_core +
1380 PCIE20_CAP_LINKCTRLSTATUS);
1381 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1382 readl_relaxed(dev->conf +
1383 ep_link_ctrlstts_offset);
1384 }
1385 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1386 readl_relaxed(dev->dm_core +
1387 PCIE20_CAP_LINKCTRLSTATUS));
1388 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1389 readl_relaxed(dev->conf +
1390 ep_link_ctrlstts_offset));
1391 break;
1392 case 8: /* enable L1 */
1393 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
1394 dev->rc_idx);
1395 msm_pcie_write_mask(dev->dm_core +
1396 PCIE20_CAP_LINKCTRLSTATUS,
1397 0, BIT(1));
1398 msm_pcie_write_mask(dev->conf +
1399 ep_link_ctrlstts_offset,
1400 0, BIT(1));
1401 if (dev->shadow_en) {
1402 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1403 readl_relaxed(dev->dm_core +
1404 PCIE20_CAP_LINKCTRLSTATUS);
1405 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1406 readl_relaxed(dev->conf +
1407 ep_link_ctrlstts_offset);
1408 }
1409 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1410 readl_relaxed(dev->dm_core +
1411 PCIE20_CAP_LINKCTRLSTATUS));
1412 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1413 readl_relaxed(dev->conf +
1414 ep_link_ctrlstts_offset));
1415 break;
1416 case 9: /* disable L1ss */
1417 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
1418 dev->rc_idx);
1419 current_offset = PCIE_EXT_CAP_OFFSET;
1420 while (current_offset) {
1421 val = readl_relaxed(dev->conf + current_offset);
1422 if ((val & 0xffff) == L1SUB_CAP_ID) {
1423 ep_l1sub_ctrl1_offset =
1424 current_offset + 0x8;
1425 break;
1426 }
1427 current_offset = val >> 20;
1428 }
1429 if (!ep_l1sub_ctrl1_offset) {
1430 PCIE_DBG_FS(dev,
1431 "PCIe: RC%d endpoint does not support l1ss registers\n",
1432 dev->rc_idx);
1433 break;
1434 }
1435
1436 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
1437 dev->rc_idx, ep_l1sub_ctrl1_offset);
1438
1439 msm_pcie_write_reg_field(dev->dm_core,
1440 PCIE20_L1SUB_CONTROL1,
1441 0xf, 0);
1442 msm_pcie_write_mask(dev->dm_core +
1443 PCIE20_DEVICE_CONTROL2_STATUS2,
1444 BIT(10), 0);
1445 msm_pcie_write_reg_field(dev->conf,
1446 ep_l1sub_ctrl1_offset,
1447 0xf, 0);
1448 msm_pcie_write_mask(dev->conf +
1449 ep_dev_ctrl2stts2_offset,
1450 BIT(10), 0);
1451 if (dev->shadow_en) {
1452 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
1453 readl_relaxed(dev->dm_core +
1454 PCIE20_L1SUB_CONTROL1);
1455 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
1456 readl_relaxed(dev->dm_core +
1457 PCIE20_DEVICE_CONTROL2_STATUS2);
1458 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
1459 readl_relaxed(dev->conf +
1460 ep_l1sub_ctrl1_offset);
1461 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
1462 readl_relaxed(dev->conf +
1463 ep_dev_ctrl2stts2_offset);
1464 }
1465 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
1466 readl_relaxed(dev->dm_core +
1467 PCIE20_L1SUB_CONTROL1));
1468 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
1469 readl_relaxed(dev->dm_core +
1470 PCIE20_DEVICE_CONTROL2_STATUS2));
1471 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
1472 readl_relaxed(dev->conf +
1473 ep_l1sub_ctrl1_offset));
1474 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
1475 readl_relaxed(dev->conf +
1476 ep_dev_ctrl2stts2_offset));
1477 break;
1478 case 10: /* enable L1ss */
1479 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
1480 dev->rc_idx);
1481 current_offset = PCIE_EXT_CAP_OFFSET;
1482 while (current_offset) {
1483 val = readl_relaxed(dev->conf + current_offset);
1484 if ((val & 0xffff) == L1SUB_CAP_ID) {
1485 ep_l1sub_cap_reg1_offset =
1486 current_offset + 0x4;
1487 ep_l1sub_ctrl1_offset =
1488 current_offset + 0x8;
1489 break;
1490 }
1491 current_offset = val >> 20;
1492 }
1493 if (!ep_l1sub_ctrl1_offset) {
1494 PCIE_DBG_FS(dev,
1495 "PCIe: RC%d endpoint does not support l1ss registers\n",
1496 dev->rc_idx);
1497 break;
1498 }
1499
1500 val = readl_relaxed(dev->conf +
1501 ep_l1sub_cap_reg1_offset);
1502
1503 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
1504 val);
1505 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
1506 dev->rc_idx, ep_l1sub_ctrl1_offset);
1507
1508 val &= 0xf;
1509
1510 msm_pcie_write_reg_field(dev->dm_core,
1511 PCIE20_L1SUB_CONTROL1,
1512 0xf, val);
1513 msm_pcie_write_mask(dev->dm_core +
1514 PCIE20_DEVICE_CONTROL2_STATUS2,
1515 0, BIT(10));
1516 msm_pcie_write_reg_field(dev->conf,
1517 ep_l1sub_ctrl1_offset,
1518 0xf, val);
1519 msm_pcie_write_mask(dev->conf +
1520 ep_dev_ctrl2stts2_offset,
1521 0, BIT(10));
1522 if (dev->shadow_en) {
1523 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
1524 readl_relaxed(dev->dm_core +
1525 PCIE20_L1SUB_CONTROL1);
1526 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
1527 readl_relaxed(dev->dm_core +
1528 PCIE20_DEVICE_CONTROL2_STATUS2);
1529 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
1530 readl_relaxed(dev->conf +
1531 ep_l1sub_ctrl1_offset);
1532 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
1533 readl_relaxed(dev->conf +
1534 ep_dev_ctrl2stts2_offset);
1535 }
1536 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
1537 readl_relaxed(dev->dm_core +
1538 PCIE20_L1SUB_CONTROL1));
1539 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
1540 readl_relaxed(dev->dm_core +
1541 PCIE20_DEVICE_CONTROL2_STATUS2));
1542 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
1543 readl_relaxed(dev->conf +
1544 ep_l1sub_ctrl1_offset));
1545 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
1546 readl_relaxed(dev->conf +
1547 ep_dev_ctrl2stts2_offset));
1548 break;
1549 case 11: /* enumerate PCIe */
1550 PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
1551 dev->rc_idx);
1552 if (dev->enumerated)
1553 PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
1554 dev->rc_idx);
1555 else {
1556 if (!msm_pcie_enumerate(dev->rc_idx))
1557 PCIE_DBG_FS(dev,
1558 "PCIe: RC%d is successfully enumerated\n",
1559 dev->rc_idx);
1560 else
1561 PCIE_DBG_FS(dev,
1562 "PCIe: RC%d enumeration failed\n",
1563 dev->rc_idx);
1564 }
1565 break;
1566 case 12: /* write a value to a register */
1567 PCIE_DBG_FS(dev,
1568 "\n\nPCIe: RC%d: writing a value to a register\n\n",
1569 dev->rc_idx);
1570
1571 if (!base_sel) {
1572 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
1573 break;
1574 }
1575
1576 PCIE_DBG_FS(dev,
1577 "base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
1578 dev->res[base_sel - 1].name,
1579 dev->res[base_sel - 1].base,
1580 wr_offset, wr_mask, wr_value);
1581
Tony Truong95747382017-01-06 14:03:03 -08001582 base_sel_size = resource_size(dev->res[base_sel - 1].resource);
1583
1584 if (wr_offset > base_sel_size - 4 ||
1585 msm_pcie_check_align(dev, wr_offset))
1586 PCIE_DBG_FS(dev,
1587 "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
1588 dev->rc_idx, wr_offset, base_sel_size - 4);
1589 else
1590 msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
1591 wr_offset, wr_mask, wr_value);
Tony Truong349ee492014-10-01 17:35:56 -07001592
1593 break;
1594 case 13: /* dump all registers of base_sel */
1595 if (!base_sel) {
1596 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
1597 break;
1598 } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
1599 pcie_parf_dump(dev);
1600 break;
1601 } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
1602 pcie_phy_dump(dev);
1603 break;
1604 } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
1605 base_sel_size = 0x1000;
1606 } else {
1607 base_sel_size = resource_size(
1608 dev->res[base_sel - 1].resource);
1609 }
1610
1611 PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
1612 dev->res[base_sel - 1].name, dev->rc_idx);
1613
1614 for (i = 0; i < base_sel_size; i += 32) {
1615 PCIE_DBG_FS(dev,
1616 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1617 i, readl_relaxed(dev->res[base_sel - 1].base + i),
1618 readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
1619 readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
1620 readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
1621 readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
1622 readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
1623 readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
1624 readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
1625 }
1626 break;
1627 default:
1628 PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
1629 break;
1630 }
1631}
1632
1633int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
1634 u32 offset, u32 mask, u32 value)
1635{
1636 int ret = 0;
1637 struct msm_pcie_dev_t *pdev = NULL;
1638
1639 if (!dev) {
1640 pr_err("PCIe: the input pci dev is NULL.\n");
1641 return -ENODEV;
1642 }
1643
1644 if (option == 12 || option == 13) {
1645 if (!base || base > 5) {
1646 PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
1647 PCIE_DBG_FS(pdev,
1648 "PCIe: base_sel is still 0x%x\n", base_sel);
1649 return -EINVAL;
1650 }
1651
1652 base_sel = base;
1653 PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
1654
1655 if (option == 12) {
1656 wr_offset = offset;
1657 wr_mask = mask;
1658 wr_value = value;
1659
1660 PCIE_DBG_FS(pdev,
1661 "PCIe: wr_offset is now 0x%x\n", wr_offset);
1662 PCIE_DBG_FS(pdev,
1663 "PCIe: wr_mask is now 0x%x\n", wr_mask);
1664 PCIE_DBG_FS(pdev,
1665 "PCIe: wr_value is now 0x%x\n", wr_value);
1666 }
1667 }
1668
1669 pdev = PCIE_BUS_PRIV_DATA(dev->bus);
1670 rc_sel = 1 << pdev->rc_idx;
1671
1672 msm_pcie_sel_debug_testcase(pdev, option);
1673
1674 return ret;
1675}
1676EXPORT_SYMBOL(msm_pcie_debug_info);
1677
Tony Truongbd9a3412017-02-27 18:30:13 -08001678#ifdef CONFIG_SYSFS
1679static ssize_t msm_pcie_enumerate_store(struct device *dev,
1680 struct device_attribute *attr,
1681 const char *buf, size_t count)
1682{
1683 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
1684 dev_get_drvdata(dev);
1685
1686 if (pcie_dev)
1687 msm_pcie_enumerate(pcie_dev->rc_idx);
1688
1689 return count;
1690}
1691
1692static DEVICE_ATTR(enumerate, 0200, NULL, msm_pcie_enumerate_store);
1693
1694static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
1695{
1696 int ret;
1697
1698 ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
1699 if (ret)
1700 PCIE_DBG_FS(dev,
1701 "RC%d: failed to create sysfs enumerate node\n",
1702 dev->rc_idx);
1703}
1704
1705static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
1706{
1707 if (dev->pdev)
1708 device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
1709}
1710#else
1711static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
1712{
1713}
1714
1715static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
1716{
1717}
1718#endif
1719
Tony Truong349ee492014-10-01 17:35:56 -07001720#ifdef CONFIG_DEBUG_FS
1721static struct dentry *dent_msm_pcie;
1722static struct dentry *dfile_rc_sel;
1723static struct dentry *dfile_case;
1724static struct dentry *dfile_base_sel;
1725static struct dentry *dfile_linkdown_panic;
1726static struct dentry *dfile_wr_offset;
1727static struct dentry *dfile_wr_mask;
1728static struct dentry *dfile_wr_value;
Tony Truong9f2c7722017-02-28 15:02:27 -08001729static struct dentry *dfile_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07001730static struct dentry *dfile_aer_enable;
1731static struct dentry *dfile_corr_counter_limit;
1732
1733static u32 rc_sel_max;
1734
1735static ssize_t msm_pcie_cmd_debug(struct file *file,
1736 const char __user *buf,
1737 size_t count, loff_t *ppos)
1738{
1739 unsigned long ret;
1740 char str[MAX_MSG_LEN];
1741 unsigned int testcase = 0;
1742 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08001743 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001744
Tony Truongfdbd5672017-01-06 16:23:14 -08001745 memset(str, 0, size);
1746 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001747 if (ret)
1748 return -EFAULT;
1749
Tony Truongfdbd5672017-01-06 16:23:14 -08001750 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001751 testcase = (testcase * 10) + (str[i] - '0');
1752
1753 if (!rc_sel)
1754 rc_sel = 1;
1755
1756 pr_alert("PCIe: TEST: %d\n", testcase);
1757
1758 for (i = 0; i < MAX_RC_NUM; i++) {
1759 if (!((rc_sel >> i) & 0x1))
1760 continue;
1761 msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
1762 }
1763
1764 return count;
1765}
1766
Stephen Boydb5b8fc32017-06-21 08:59:11 -07001767static const struct file_operations msm_pcie_cmd_debug_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07001768 .write = msm_pcie_cmd_debug,
1769};
1770
1771static ssize_t msm_pcie_set_rc_sel(struct file *file,
1772 const char __user *buf,
1773 size_t count, loff_t *ppos)
1774{
1775 unsigned long ret;
1776 char str[MAX_MSG_LEN];
1777 int i;
1778 u32 new_rc_sel = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08001779 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001780
Tony Truongfdbd5672017-01-06 16:23:14 -08001781 memset(str, 0, size);
1782 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001783 if (ret)
1784 return -EFAULT;
1785
Tony Truongfdbd5672017-01-06 16:23:14 -08001786 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001787 new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
1788
1789 if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
1790 pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
1791 pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
1792 } else {
1793 rc_sel = new_rc_sel;
1794 pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
1795 }
1796
1797 pr_alert("PCIe: the following RC(s) will be tested:\n");
1798 for (i = 0; i < MAX_RC_NUM; i++) {
1799 if (!rc_sel) {
1800 pr_alert("RC %d\n", i);
1801 break;
1802 } else if (rc_sel & (1 << i)) {
1803 pr_alert("RC %d\n", i);
1804 }
1805 }
1806
1807 return count;
1808}
1809
Stephen Boydb5b8fc32017-06-21 08:59:11 -07001810static const struct file_operations msm_pcie_rc_sel_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07001811 .write = msm_pcie_set_rc_sel,
1812};
1813
1814static ssize_t msm_pcie_set_base_sel(struct file *file,
1815 const char __user *buf,
1816 size_t count, loff_t *ppos)
1817{
1818 unsigned long ret;
1819 char str[MAX_MSG_LEN];
1820 int i;
1821 u32 new_base_sel = 0;
1822 char *base_sel_name;
Tony Truongfdbd5672017-01-06 16:23:14 -08001823 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001824
Tony Truongfdbd5672017-01-06 16:23:14 -08001825 memset(str, 0, size);
1826 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001827 if (ret)
1828 return -EFAULT;
1829
Tony Truongfdbd5672017-01-06 16:23:14 -08001830 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001831 new_base_sel = (new_base_sel * 10) + (str[i] - '0');
1832
1833 if (!new_base_sel || new_base_sel > 5) {
1834 pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
1835 new_base_sel);
1836 pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
1837 } else {
1838 base_sel = new_base_sel;
1839 pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
1840 }
1841
1842 switch (base_sel) {
1843 case 1:
1844 base_sel_name = "PARF";
1845 break;
1846 case 2:
1847 base_sel_name = "PHY";
1848 break;
1849 case 3:
1850 base_sel_name = "RC CONFIG SPACE";
1851 break;
1852 case 4:
1853 base_sel_name = "ELBI";
1854 break;
1855 case 5:
1856 base_sel_name = "EP CONFIG SPACE";
1857 break;
1858 default:
1859 base_sel_name = "INVALID";
1860 break;
1861 }
1862
1863 pr_alert("%s\n", base_sel_name);
1864
1865 return count;
1866}
1867
Stephen Boydb5b8fc32017-06-21 08:59:11 -07001868static const struct file_operations msm_pcie_base_sel_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07001869 .write = msm_pcie_set_base_sel,
1870};
1871
1872static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
1873 const char __user *buf,
1874 size_t count, loff_t *ppos)
1875{
1876 unsigned long ret;
1877 char str[MAX_MSG_LEN];
1878 u32 new_linkdown_panic = 0;
1879 int i;
1880
1881 memset(str, 0, sizeof(str));
1882 ret = copy_from_user(str, buf, sizeof(str));
1883 if (ret)
1884 return -EFAULT;
1885
1886 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
1887 new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
1888
1889 if (new_linkdown_panic <= 1) {
1890 for (i = 0; i < MAX_RC_NUM; i++) {
1891 if (!rc_sel) {
1892 msm_pcie_dev[0].linkdown_panic =
1893 new_linkdown_panic;
1894 PCIE_DBG_FS(&msm_pcie_dev[0],
1895 "PCIe: RC0: linkdown_panic is now %d\n",
1896 msm_pcie_dev[0].linkdown_panic);
1897 break;
1898 } else if (rc_sel & (1 << i)) {
1899 msm_pcie_dev[i].linkdown_panic =
1900 new_linkdown_panic;
1901 PCIE_DBG_FS(&msm_pcie_dev[i],
1902 "PCIe: RC%d: linkdown_panic is now %d\n",
1903 i, msm_pcie_dev[i].linkdown_panic);
1904 }
1905 }
1906 } else {
1907 pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
1908 new_linkdown_panic);
1909 }
1910
1911 return count;
1912}
1913
Stephen Boydb5b8fc32017-06-21 08:59:11 -07001914static const struct file_operations msm_pcie_linkdown_panic_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07001915 .write = msm_pcie_set_linkdown_panic,
1916};
1917
1918static ssize_t msm_pcie_set_wr_offset(struct file *file,
1919 const char __user *buf,
1920 size_t count, loff_t *ppos)
1921{
1922 unsigned long ret;
1923 char str[MAX_MSG_LEN];
1924 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08001925 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001926
Tony Truongfdbd5672017-01-06 16:23:14 -08001927 memset(str, 0, size);
1928 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001929 if (ret)
1930 return -EFAULT;
1931
1932 wr_offset = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08001933 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001934 wr_offset = (wr_offset * 10) + (str[i] - '0');
1935
1936 pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
1937
1938 return count;
1939}
1940
Stephen Boydb5b8fc32017-06-21 08:59:11 -07001941static const struct file_operations msm_pcie_wr_offset_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07001942 .write = msm_pcie_set_wr_offset,
1943};
1944
1945static ssize_t msm_pcie_set_wr_mask(struct file *file,
1946 const char __user *buf,
1947 size_t count, loff_t *ppos)
1948{
1949 unsigned long ret;
1950 char str[MAX_MSG_LEN];
1951 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08001952 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001953
Tony Truongfdbd5672017-01-06 16:23:14 -08001954 memset(str, 0, size);
1955 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001956 if (ret)
1957 return -EFAULT;
1958
1959 wr_mask = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08001960 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001961 wr_mask = (wr_mask * 10) + (str[i] - '0');
1962
1963 pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
1964
1965 return count;
1966}
1967
Stephen Boydb5b8fc32017-06-21 08:59:11 -07001968static const struct file_operations msm_pcie_wr_mask_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07001969 .write = msm_pcie_set_wr_mask,
1970};
1971static ssize_t msm_pcie_set_wr_value(struct file *file,
1972 const char __user *buf,
1973 size_t count, loff_t *ppos)
1974{
1975 unsigned long ret;
1976 char str[MAX_MSG_LEN];
1977 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08001978 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001979
Tony Truongfdbd5672017-01-06 16:23:14 -08001980 memset(str, 0, size);
1981 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001982 if (ret)
1983 return -EFAULT;
1984
1985 wr_value = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08001986 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001987 wr_value = (wr_value * 10) + (str[i] - '0');
1988
1989 pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
1990
1991 return count;
1992}
1993
Stephen Boydb5b8fc32017-06-21 08:59:11 -07001994static const struct file_operations msm_pcie_wr_value_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07001995 .write = msm_pcie_set_wr_value,
1996};
1997
Tony Truong9f2c7722017-02-28 15:02:27 -08001998static ssize_t msm_pcie_set_boot_option(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07001999 const char __user *buf,
2000 size_t count, loff_t *ppos)
2001{
2002 unsigned long ret;
2003 char str[MAX_MSG_LEN];
Tony Truong9f2c7722017-02-28 15:02:27 -08002004 u32 new_boot_option = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002005 int i;
2006
2007 memset(str, 0, sizeof(str));
2008 ret = copy_from_user(str, buf, sizeof(str));
2009 if (ret)
2010 return -EFAULT;
2011
2012 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong9f2c7722017-02-28 15:02:27 -08002013 new_boot_option = (new_boot_option * 10) + (str[i] - '0');
Tony Truong349ee492014-10-01 17:35:56 -07002014
Tony Truong9f2c7722017-02-28 15:02:27 -08002015 if (new_boot_option <= 1) {
Tony Truong349ee492014-10-01 17:35:56 -07002016 for (i = 0; i < MAX_RC_NUM; i++) {
2017 if (!rc_sel) {
Tony Truong9f2c7722017-02-28 15:02:27 -08002018 msm_pcie_dev[0].boot_option = new_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002019 PCIE_DBG_FS(&msm_pcie_dev[0],
Tony Truong9f2c7722017-02-28 15:02:27 -08002020 "PCIe: RC0: boot_option is now 0x%x\n",
2021 msm_pcie_dev[0].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002022 break;
2023 } else if (rc_sel & (1 << i)) {
Tony Truong9f2c7722017-02-28 15:02:27 -08002024 msm_pcie_dev[i].boot_option = new_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002025 PCIE_DBG_FS(&msm_pcie_dev[i],
Tony Truong9f2c7722017-02-28 15:02:27 -08002026 "PCIe: RC%d: boot_option is now 0x%x\n",
2027 i, msm_pcie_dev[i].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002028 }
2029 }
2030 } else {
Tony Truong9f2c7722017-02-28 15:02:27 -08002031 pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
2032 new_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002033 }
2034
2035 return count;
2036}
2037
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002038static const struct file_operations msm_pcie_boot_option_ops = {
Tony Truong9f2c7722017-02-28 15:02:27 -08002039 .write = msm_pcie_set_boot_option,
Tony Truong349ee492014-10-01 17:35:56 -07002040};
2041
2042static ssize_t msm_pcie_set_aer_enable(struct file *file,
2043 const char __user *buf,
2044 size_t count, loff_t *ppos)
2045{
2046 unsigned long ret;
2047 char str[MAX_MSG_LEN];
2048 u32 new_aer_enable = 0;
2049 u32 temp_rc_sel;
2050 int i;
2051
2052 memset(str, 0, sizeof(str));
2053 ret = copy_from_user(str, buf, sizeof(str));
2054 if (ret)
2055 return -EFAULT;
2056
2057 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2058 new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
2059
2060 if (new_aer_enable > 1) {
2061 pr_err(
2062 "PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
2063 new_aer_enable);
2064 return count;
2065 }
2066
2067 if (rc_sel)
2068 temp_rc_sel = rc_sel;
2069 else
2070 temp_rc_sel = 0x1;
2071
2072 for (i = 0; i < MAX_RC_NUM; i++) {
2073 if (temp_rc_sel & (1 << i)) {
2074 msm_pcie_dev[i].aer_enable = new_aer_enable;
2075 PCIE_DBG_FS(&msm_pcie_dev[i],
2076 "PCIe: RC%d: aer_enable is now %d\n",
2077 i, msm_pcie_dev[i].aer_enable);
2078
2079 msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
2080 PCIE20_BRIDGE_CTRL,
2081 new_aer_enable ? 0 : BIT(16),
2082 new_aer_enable ? BIT(16) : 0);
2083
2084 PCIE_DBG_FS(&msm_pcie_dev[i],
2085 "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
2086 readl_relaxed(msm_pcie_dev[i].dm_core +
2087 PCIE20_BRIDGE_CTRL));
2088 }
2089 }
2090
2091 return count;
2092}
2093
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002094static const struct file_operations msm_pcie_aer_enable_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002095 .write = msm_pcie_set_aer_enable,
2096};
2097
2098static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
2099 const char __user *buf,
2100 size_t count, loff_t *ppos)
2101{
2102 unsigned long ret;
2103 char str[MAX_MSG_LEN];
2104 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002105 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002106
Tony Truongfdbd5672017-01-06 16:23:14 -08002107 memset(str, 0, size);
2108 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002109 if (ret)
2110 return -EFAULT;
2111
2112 corr_counter_limit = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002113 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002114 corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
2115
2116 pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
2117
2118 return count;
2119}
2120
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002121static const struct file_operations msm_pcie_corr_counter_limit_ops = {
Tony Truong349ee492014-10-01 17:35:56 -07002122 .write = msm_pcie_set_corr_counter_limit,
2123};
2124
2125static void msm_pcie_debugfs_init(void)
2126{
2127 rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
2128 wr_mask = 0xffffffff;
2129
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002130 dent_msm_pcie = debugfs_create_dir("pci-msm", NULL);
Tony Truong349ee492014-10-01 17:35:56 -07002131 if (IS_ERR(dent_msm_pcie)) {
2132 pr_err("PCIe: fail to create the folder for debug_fs.\n");
2133 return;
2134 }
2135
2136 dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002137 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002138 &msm_pcie_rc_sel_ops);
2139 if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
2140 pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
2141 goto rc_sel_error;
2142 }
2143
2144 dfile_case = debugfs_create_file("case", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002145 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002146 &msm_pcie_cmd_debug_ops);
2147 if (!dfile_case || IS_ERR(dfile_case)) {
2148 pr_err("PCIe: fail to create the file for debug_fs case.\n");
2149 goto case_error;
2150 }
2151
2152 dfile_base_sel = debugfs_create_file("base_sel", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002153 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002154 &msm_pcie_base_sel_ops);
2155 if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
2156 pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
2157 goto base_sel_error;
2158 }
2159
2160 dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002161 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002162 &msm_pcie_linkdown_panic_ops);
2163 if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
2164 pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
2165 goto linkdown_panic_error;
2166 }
2167
2168 dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002169 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002170 &msm_pcie_wr_offset_ops);
2171 if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
2172 pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
2173 goto wr_offset_error;
2174 }
2175
2176 dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002177 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002178 &msm_pcie_wr_mask_ops);
2179 if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
2180 pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
2181 goto wr_mask_error;
2182 }
2183
2184 dfile_wr_value = debugfs_create_file("wr_value", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002185 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002186 &msm_pcie_wr_value_ops);
2187 if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
2188 pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
2189 goto wr_value_error;
2190 }
2191
Tony Truong9f2c7722017-02-28 15:02:27 -08002192 dfile_boot_option = debugfs_create_file("boot_option", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002193 dent_msm_pcie, NULL,
Tony Truong9f2c7722017-02-28 15:02:27 -08002194 &msm_pcie_boot_option_ops);
2195 if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
2196 pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
2197 goto boot_option_error;
Tony Truong349ee492014-10-01 17:35:56 -07002198 }
2199
2200 dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002201 dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002202 &msm_pcie_aer_enable_ops);
2203 if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
2204 pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
2205 goto aer_enable_error;
2206 }
2207
2208 dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002209 0664, dent_msm_pcie, NULL,
Tony Truong349ee492014-10-01 17:35:56 -07002210 &msm_pcie_corr_counter_limit_ops);
2211 if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
2212 pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
2213 goto corr_counter_limit_error;
2214 }
2215 return;
2216
2217corr_counter_limit_error:
2218 debugfs_remove(dfile_aer_enable);
2219aer_enable_error:
Tony Truong9f2c7722017-02-28 15:02:27 -08002220 debugfs_remove(dfile_boot_option);
2221boot_option_error:
Tony Truong349ee492014-10-01 17:35:56 -07002222 debugfs_remove(dfile_wr_value);
2223wr_value_error:
2224 debugfs_remove(dfile_wr_mask);
2225wr_mask_error:
2226 debugfs_remove(dfile_wr_offset);
2227wr_offset_error:
2228 debugfs_remove(dfile_linkdown_panic);
2229linkdown_panic_error:
2230 debugfs_remove(dfile_base_sel);
2231base_sel_error:
2232 debugfs_remove(dfile_case);
2233case_error:
2234 debugfs_remove(dfile_rc_sel);
2235rc_sel_error:
2236 debugfs_remove(dent_msm_pcie);
2237}
2238
2239static void msm_pcie_debugfs_exit(void)
2240{
2241 debugfs_remove(dfile_rc_sel);
2242 debugfs_remove(dfile_case);
2243 debugfs_remove(dfile_base_sel);
2244 debugfs_remove(dfile_linkdown_panic);
2245 debugfs_remove(dfile_wr_offset);
2246 debugfs_remove(dfile_wr_mask);
2247 debugfs_remove(dfile_wr_value);
Tony Truong9f2c7722017-02-28 15:02:27 -08002248 debugfs_remove(dfile_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002249 debugfs_remove(dfile_aer_enable);
2250 debugfs_remove(dfile_corr_counter_limit);
2251}
2252#else
2253static void msm_pcie_debugfs_init(void)
2254{
2255}
2256
2257static void msm_pcie_debugfs_exit(void)
2258{
2259}
2260#endif
2261
2262static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
2263{
2264 return readl_relaxed(dev->dm_core +
2265 PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
2266}
2267
2268/**
2269 * msm_pcie_iatu_config - configure outbound address translation region
2270 * @dev: root commpex
2271 * @nr: region number
2272 * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
2273 * @host_addr: - region start address on host
2274 * @host_end: - region end address (low 32 bit) on host,
2275 * upper 32 bits are same as for @host_addr
2276 * @target_addr: - region start address on target
2277 */
2278static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
2279 unsigned long host_addr, u32 host_end,
2280 unsigned long target_addr)
2281{
2282 void __iomem *pcie20 = dev->dm_core;
2283
2284 if (dev->shadow_en) {
2285 dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
2286 nr;
2287 dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
2288 type;
2289 dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
2290 lower_32_bits(host_addr);
2291 dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
2292 upper_32_bits(host_addr);
2293 dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
2294 host_end;
2295 dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
2296 lower_32_bits(target_addr);
2297 dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
2298 upper_32_bits(target_addr);
2299 dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
2300 BIT(31);
2301 }
2302
2303 /* select region */
2304 writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
2305 /* ensure that hardware locks it */
2306 wmb();
2307
2308 /* switch off region before changing it */
2309 writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
2310 /* and wait till it propagates to the hardware */
2311 wmb();
2312
2313 writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
2314 writel_relaxed(lower_32_bits(host_addr),
2315 pcie20 + PCIE20_PLR_IATU_LBAR);
2316 writel_relaxed(upper_32_bits(host_addr),
2317 pcie20 + PCIE20_PLR_IATU_UBAR);
2318 writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
2319 writel_relaxed(lower_32_bits(target_addr),
2320 pcie20 + PCIE20_PLR_IATU_LTAR);
2321 writel_relaxed(upper_32_bits(target_addr),
2322 pcie20 + PCIE20_PLR_IATU_UTAR);
2323 /* ensure that changes propagated to the hardware */
2324 wmb();
2325 writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
2326
2327 /* ensure that changes propagated to the hardware */
2328 wmb();
2329
2330 if (dev->enumerated) {
2331 PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
2332 dev->pcidev_table[nr].bdf >> 24,
2333 dev->pcidev_table[nr].bdf >> 19 & 0x1f,
2334 dev->pcidev_table[nr].bdf >> 16 & 0x07);
2335 PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
2336 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
2337 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
2338 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
2339 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
2340 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
2341 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
2342 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
2343 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
2344 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
2345 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
2346 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
2347 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
2348 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
2349 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
2350 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
2351 }
2352}
2353
2354/**
2355 * msm_pcie_cfg_bdf - configure for config access
2356 * @dev: root commpex
2357 * @bus: PCI bus number
2358 * @devfn: PCI dev and function number
2359 *
2360 * Remap if required region 0 for config access of proper type
2361 * (CFG0 for bus 1, CFG1 for other buses)
2362 * Cache current device bdf for speed-up
2363 */
2364static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
2365{
2366 struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
2367 u32 bdf = BDF_OFFSET(bus, devfn);
2368 u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
2369
2370 if (dev->current_bdf == bdf)
2371 return;
2372
2373 msm_pcie_iatu_config(dev, 0, type,
2374 axi_conf->start,
2375 axi_conf->start + SZ_4K - 1,
2376 bdf);
2377
2378 dev->current_bdf = bdf;
2379}
2380
2381static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
2382 u32 word_offset, u32 wr_val,
2383 u32 bdf, bool rc)
2384{
2385 int i, j;
2386 u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
2387
2388 if (rc) {
2389 dev->rc_shadow[word_offset / 4] = wr_val;
2390 } else {
2391 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2392 if (!dev->pcidev_table[i].bdf) {
2393 for (j = 0; j < max_dev; j++)
2394 if (!msm_pcie_dev_tbl[j].bdf) {
2395 msm_pcie_dev_tbl[j].bdf = bdf;
2396 break;
2397 }
2398 dev->pcidev_table[i].bdf = bdf;
2399 if ((!dev->bridge_found) && (i > 0))
2400 dev->bridge_found = true;
2401 }
2402 if (dev->pcidev_table[i].bdf == bdf) {
2403 dev->ep_shadow[i][word_offset / 4] = wr_val;
2404 break;
2405 }
2406 }
2407 }
2408}
2409
2410static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
2411 int where, int size, u32 *val)
2412{
2413 uint32_t word_offset, byte_offset, mask;
2414 uint32_t rd_val, wr_val;
2415 struct msm_pcie_dev_t *dev;
2416 void __iomem *config_base;
2417 bool rc = false;
2418 u32 rc_idx;
2419 int rv = 0;
2420 u32 bdf = BDF_OFFSET(bus->number, devfn);
2421 int i;
2422
2423 dev = PCIE_BUS_PRIV_DATA(bus);
2424
2425 if (!dev) {
2426 pr_err("PCIe: No device found for this bus.\n");
2427 *val = ~0;
2428 rv = PCIBIOS_DEVICE_NOT_FOUND;
2429 goto out;
2430 }
2431
2432 rc_idx = dev->rc_idx;
2433 rc = (bus->number == 0);
2434
2435 spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
2436
2437 if (!dev->cfg_access) {
2438 PCIE_DBG3(dev,
2439 "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
2440 rc_idx, bus->number, devfn, where, size);
2441 *val = ~0;
2442 rv = PCIBIOS_DEVICE_NOT_FOUND;
2443 goto unlock;
2444 }
2445
2446 if (rc && (devfn != 0)) {
2447 PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
2448 (oper == RD) ? "rd" : "wr", bus->number, devfn);
2449 *val = ~0;
2450 rv = PCIBIOS_DEVICE_NOT_FOUND;
2451 goto unlock;
2452 }
2453
2454 if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
2455 PCIE_DBG3(dev,
2456 "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
2457 rc_idx, bus->number, devfn, where, size);
2458 *val = ~0;
2459 rv = PCIBIOS_DEVICE_NOT_FOUND;
2460 goto unlock;
2461 }
2462
2463 /* check if the link is up for endpoint */
2464 if (!rc && !msm_pcie_is_link_up(dev)) {
2465 PCIE_ERR(dev,
2466 "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
2467 rc_idx, (oper == RD) ? "rd" : "wr",
2468 bus->number, devfn);
2469 *val = ~0;
2470 rv = PCIBIOS_DEVICE_NOT_FOUND;
2471 goto unlock;
2472 }
2473
2474 if (!rc && !dev->enumerated)
2475 msm_pcie_cfg_bdf(dev, bus->number, devfn);
2476
2477 word_offset = where & ~0x3;
2478 byte_offset = where & 0x3;
Tony Truonge48ec872017-03-14 12:47:58 -07002479 mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002480
2481 if (rc || !dev->enumerated) {
2482 config_base = rc ? dev->dm_core : dev->conf;
2483 } else {
2484 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2485 if (dev->pcidev_table[i].bdf == bdf) {
2486 config_base = dev->pcidev_table[i].conf_base;
2487 break;
2488 }
2489 }
2490 if (i == MAX_DEVICE_NUM) {
2491 *val = ~0;
2492 rv = PCIBIOS_DEVICE_NOT_FOUND;
2493 goto unlock;
2494 }
2495 }
2496
2497 rd_val = readl_relaxed(config_base + word_offset);
2498
2499 if (oper == RD) {
2500 *val = ((rd_val & mask) >> (8 * byte_offset));
2501 PCIE_DBG3(dev,
2502 "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
2503 rc_idx, bus->number, devfn, where, size, *val, rd_val);
2504 } else {
2505 wr_val = (rd_val & ~mask) |
2506 ((*val << (8 * byte_offset)) & mask);
2507
2508 if ((bus->number == 0) && (where == 0x3c))
2509 wr_val = wr_val | (3 << 16);
2510
2511 writel_relaxed(wr_val, config_base + word_offset);
2512 wmb(); /* ensure config data is written to hardware register */
2513
Tony Truonge48ec872017-03-14 12:47:58 -07002514 if (dev->shadow_en) {
2515 if (rd_val == PCIE_LINK_DOWN &&
2516 (readl_relaxed(config_base) == PCIE_LINK_DOWN))
2517 PCIE_ERR(dev,
2518 "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
2519 rc_idx, bus->number, devfn,
2520 where, size);
2521 else
2522 msm_pcie_save_shadow(dev, word_offset, wr_val,
2523 bdf, rc);
2524 }
Tony Truong349ee492014-10-01 17:35:56 -07002525
2526 PCIE_DBG3(dev,
2527 "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
2528 rc_idx, bus->number, devfn, where, size,
2529 wr_val, rd_val, *val);
2530 }
2531
2532unlock:
2533 spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
2534out:
2535 return rv;
2536}
2537
2538static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
2539 int size, u32 *val)
2540{
2541 int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
2542
2543 if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
2544 *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
2545 PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
2546 }
2547
2548 return ret;
2549}
2550
2551static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2552 int where, int size, u32 val)
2553{
2554 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
2555}
2556
2557static struct pci_ops msm_pcie_ops = {
2558 .read = msm_pcie_rd_conf,
2559 .write = msm_pcie_wr_conf,
2560};
2561
2562static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
2563{
2564 int rc = 0, i;
2565 struct msm_pcie_gpio_info_t *info;
2566
2567 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2568
2569 for (i = 0; i < dev->gpio_n; i++) {
2570 info = &dev->gpio[i];
2571
2572 if (!info->num)
2573 continue;
2574
2575 rc = gpio_request(info->num, info->name);
2576 if (rc) {
2577 PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
2578 dev->rc_idx, info->name, rc);
2579 break;
2580 }
2581
2582 if (info->out)
2583 rc = gpio_direction_output(info->num, info->init);
2584 else
2585 rc = gpio_direction_input(info->num);
2586 if (rc) {
2587 PCIE_ERR(dev,
2588 "PCIe: RC%d can't set direction for GPIO %s:%d\n",
2589 dev->rc_idx, info->name, rc);
2590 gpio_free(info->num);
2591 break;
2592 }
2593 }
2594
2595 if (rc)
2596 while (i--)
2597 gpio_free(dev->gpio[i].num);
2598
2599 return rc;
2600}
2601
2602static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
2603{
2604 int i;
2605
2606 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2607
2608 for (i = 0; i < dev->gpio_n; i++)
2609 gpio_free(dev->gpio[i].num);
2610}
2611
Stephen Boydb5b8fc32017-06-21 08:59:11 -07002612static int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07002613{
2614 int i, rc = 0;
2615 struct regulator *vreg;
2616 struct msm_pcie_vreg_info_t *info;
2617
2618 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2619
2620 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
2621 info = &dev->vreg[i];
2622 vreg = info->hdl;
2623
2624 if (!vreg)
2625 continue;
2626
2627 PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
2628 dev->rc_idx, info->name);
2629 if (info->max_v) {
2630 rc = regulator_set_voltage(vreg,
2631 info->min_v, info->max_v);
2632 if (rc) {
2633 PCIE_ERR(dev,
2634 "PCIe: RC%d can't set voltage for %s: %d\n",
2635 dev->rc_idx, info->name, rc);
2636 break;
2637 }
2638 }
2639
2640 if (info->opt_mode) {
2641 rc = regulator_set_load(vreg, info->opt_mode);
2642 if (rc < 0) {
2643 PCIE_ERR(dev,
2644 "PCIe: RC%d can't set mode for %s: %d\n",
2645 dev->rc_idx, info->name, rc);
2646 break;
2647 }
2648 }
2649
2650 rc = regulator_enable(vreg);
2651 if (rc) {
2652 PCIE_ERR(dev,
2653 "PCIe: RC%d can't enable regulator %s: %d\n",
2654 dev->rc_idx, info->name, rc);
2655 break;
2656 }
2657 }
2658
2659 if (rc)
2660 while (i--) {
2661 struct regulator *hdl = dev->vreg[i].hdl;
2662
2663 if (hdl) {
2664 regulator_disable(hdl);
2665 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
2666 PCIE_DBG(dev,
2667 "RC%d: Removing %s vote.\n",
2668 dev->rc_idx,
2669 dev->vreg[i].name);
2670 regulator_set_voltage(hdl,
Tony Truongb213ac12017-04-05 15:21:20 -07002671 RPMH_REGULATOR_LEVEL_OFF,
2672 RPMH_REGULATOR_LEVEL_MAX);
Tony Truong349ee492014-10-01 17:35:56 -07002673 }
2674 }
2675
2676 }
2677
2678 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2679
2680 return rc;
2681}
2682
2683static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
2684{
2685 int i;
2686
2687 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2688
2689 for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
2690 if (dev->vreg[i].hdl) {
2691 PCIE_DBG(dev, "Vreg %s is being disabled\n",
2692 dev->vreg[i].name);
2693 regulator_disable(dev->vreg[i].hdl);
2694
2695 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
2696 PCIE_DBG(dev,
2697 "RC%d: Removing %s vote.\n",
2698 dev->rc_idx,
2699 dev->vreg[i].name);
2700 regulator_set_voltage(dev->vreg[i].hdl,
Tony Truongb213ac12017-04-05 15:21:20 -07002701 RPMH_REGULATOR_LEVEL_OFF,
2702 RPMH_REGULATOR_LEVEL_MAX);
Tony Truong349ee492014-10-01 17:35:56 -07002703 }
2704 }
2705 }
2706
2707 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2708}
2709
2710static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
2711{
2712 int i, rc = 0;
2713 struct msm_pcie_clk_info_t *info;
2714 struct msm_pcie_reset_info_t *reset_info;
2715
2716 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2717
2718 rc = regulator_enable(dev->gdsc);
2719
2720 if (rc) {
2721 PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
2722 dev->rc_idx, dev->pdev->name);
2723 return rc;
2724 }
2725
2726 if (dev->gdsc_smmu) {
2727 rc = regulator_enable(dev->gdsc_smmu);
2728
2729 if (rc) {
2730 PCIE_ERR(dev,
2731 "PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
2732 dev->rc_idx, dev->pdev->name);
2733 return rc;
2734 }
2735 }
2736
2737 PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
2738 if (dev->bus_client) {
2739 rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
2740 if (rc) {
2741 PCIE_ERR(dev,
2742 "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
2743 dev->rc_idx, rc);
2744 return rc;
2745 }
2746
2747 PCIE_DBG2(dev,
2748 "PCIe: set bus bandwidth for RC%d.\n",
2749 dev->rc_idx);
2750 }
2751
2752 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
2753 info = &dev->clk[i];
2754
2755 if (!info->hdl)
2756 continue;
2757
2758 if (info->config_mem)
2759 msm_pcie_config_clock_mem(dev, info);
2760
2761 if (info->freq) {
2762 rc = clk_set_rate(info->hdl, info->freq);
2763 if (rc) {
2764 PCIE_ERR(dev,
2765 "PCIe: RC%d can't set rate for clk %s: %d.\n",
2766 dev->rc_idx, info->name, rc);
2767 break;
2768 }
2769
2770 PCIE_DBG2(dev,
2771 "PCIe: RC%d set rate for clk %s.\n",
2772 dev->rc_idx, info->name);
2773 }
2774
2775 rc = clk_prepare_enable(info->hdl);
2776
2777 if (rc)
2778 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
2779 dev->rc_idx, info->name);
2780 else
2781 PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
2782 info->name, dev->rc_idx);
2783 }
2784
2785 if (rc) {
2786 PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
2787 dev->rc_idx);
2788 while (i--) {
2789 struct clk *hdl = dev->clk[i].hdl;
2790
2791 if (hdl)
2792 clk_disable_unprepare(hdl);
2793 }
2794
2795 if (dev->gdsc_smmu)
2796 regulator_disable(dev->gdsc_smmu);
2797
2798 regulator_disable(dev->gdsc);
2799 }
2800
2801 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
2802 reset_info = &dev->reset[i];
2803 if (reset_info->hdl) {
Tony Truongc21087d2017-05-01 17:58:06 -07002804 rc = reset_control_assert(reset_info->hdl);
2805 if (rc)
2806 PCIE_ERR(dev,
2807 "PCIe: RC%d failed to assert reset for %s.\n",
2808 dev->rc_idx, reset_info->name);
2809 else
2810 PCIE_DBG2(dev,
2811 "PCIe: RC%d successfully asserted reset for %s.\n",
2812 dev->rc_idx, reset_info->name);
2813
2814 /* add a 1ms delay to ensure the reset is asserted */
2815 usleep_range(1000, 1005);
2816
Tony Truong349ee492014-10-01 17:35:56 -07002817 rc = reset_control_deassert(reset_info->hdl);
2818 if (rc)
2819 PCIE_ERR(dev,
2820 "PCIe: RC%d failed to deassert reset for %s.\n",
2821 dev->rc_idx, reset_info->name);
2822 else
2823 PCIE_DBG2(dev,
2824 "PCIe: RC%d successfully deasserted reset for %s.\n",
2825 dev->rc_idx, reset_info->name);
2826 }
2827 }
2828
2829 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2830
2831 return rc;
2832}
2833
2834static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
2835{
2836 int i;
2837 int rc;
2838
2839 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2840
2841 for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
2842 if (dev->clk[i].hdl)
2843 clk_disable_unprepare(dev->clk[i].hdl);
2844
2845 if (dev->bus_client) {
2846 PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
2847 dev->rc_idx);
2848
2849 rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
2850 if (rc)
2851 PCIE_ERR(dev,
2852 "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
2853 dev->rc_idx, rc);
2854 else
2855 PCIE_DBG(dev,
2856 "PCIe: relinquish bus bandwidth for RC%d.\n",
2857 dev->rc_idx);
2858 }
2859
2860 if (dev->gdsc_smmu)
2861 regulator_disable(dev->gdsc_smmu);
2862
2863 regulator_disable(dev->gdsc);
2864
2865 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2866}
2867
2868static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
2869{
2870 int i, rc = 0;
2871 struct msm_pcie_clk_info_t *info;
2872 struct msm_pcie_reset_info_t *pipe_reset_info;
2873
2874 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2875
2876 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
2877 info = &dev->pipeclk[i];
2878
2879 if (!info->hdl)
2880 continue;
2881
2882
2883 if (info->config_mem)
2884 msm_pcie_config_clock_mem(dev, info);
2885
2886 if (info->freq) {
2887 rc = clk_set_rate(info->hdl, info->freq);
2888 if (rc) {
2889 PCIE_ERR(dev,
2890 "PCIe: RC%d can't set rate for clk %s: %d.\n",
2891 dev->rc_idx, info->name, rc);
2892 break;
2893 }
2894
2895 PCIE_DBG2(dev,
2896 "PCIe: RC%d set rate for clk %s: %d.\n",
2897 dev->rc_idx, info->name, rc);
2898 }
2899
2900 rc = clk_prepare_enable(info->hdl);
2901
2902 if (rc)
2903 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
2904 dev->rc_idx, info->name);
2905 else
2906 PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
2907 dev->rc_idx, info->name);
2908 }
2909
2910 if (rc) {
2911 PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
2912 dev->rc_idx);
2913 while (i--)
2914 if (dev->pipeclk[i].hdl)
2915 clk_disable_unprepare(dev->pipeclk[i].hdl);
2916 }
2917
2918 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
2919 pipe_reset_info = &dev->pipe_reset[i];
2920 if (pipe_reset_info->hdl) {
Tony Truongc21087d2017-05-01 17:58:06 -07002921 rc = reset_control_assert(pipe_reset_info->hdl);
2922 if (rc)
2923 PCIE_ERR(dev,
2924 "PCIe: RC%d failed to assert pipe reset for %s.\n",
2925 dev->rc_idx, pipe_reset_info->name);
2926 else
2927 PCIE_DBG2(dev,
2928 "PCIe: RC%d successfully asserted pipe reset for %s.\n",
2929 dev->rc_idx, pipe_reset_info->name);
2930
2931 /* add a 1ms delay to ensure the reset is asserted */
2932 usleep_range(1000, 1005);
2933
Tony Truong349ee492014-10-01 17:35:56 -07002934 rc = reset_control_deassert(
2935 pipe_reset_info->hdl);
2936 if (rc)
2937 PCIE_ERR(dev,
2938 "PCIe: RC%d failed to deassert pipe reset for %s.\n",
2939 dev->rc_idx, pipe_reset_info->name);
2940 else
2941 PCIE_DBG2(dev,
2942 "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
2943 dev->rc_idx, pipe_reset_info->name);
2944 }
2945 }
2946
2947 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2948
2949 return rc;
2950}
2951
2952static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
2953{
2954 int i;
2955
2956 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2957
2958 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
2959 if (dev->pipeclk[i].hdl)
2960 clk_disable_unprepare(
2961 dev->pipeclk[i].hdl);
2962
2963 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2964}
2965
2966static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
2967{
2968 int i;
2969 u8 type;
2970 struct msm_pcie_device_info *dev_table = dev->pcidev_table;
2971
2972 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2973 if (!dev_table[i].bdf)
2974 break;
2975
2976 type = dev_table[i].bdf >> 24 == 0x1 ?
2977 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
2978
2979 msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
2980 dev_table[i].phy_address + SZ_4K - 1,
2981 dev_table[i].bdf);
2982 }
2983}
2984
2985static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
2986{
Tony Truong349ee492014-10-01 17:35:56 -07002987 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2988
2989 /*
2990 * program and enable address translation region 0 (device config
2991 * address space); region type config;
2992 * axi config address range to device config address range
2993 */
2994 if (dev->enumerated) {
2995 msm_pcie_iatu_config_all_ep(dev);
2996 } else {
2997 dev->current_bdf = 0; /* to force IATU re-config */
2998 msm_pcie_cfg_bdf(dev, 1, 0);
2999 }
3000
3001 /* configure N_FTS */
3002 PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3003 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3004 if (!dev->n_fts)
3005 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3006 0, BIT(15));
3007 else
3008 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3009 PCIE20_ACK_N_FTS,
3010 dev->n_fts << 8);
3011
3012 if (dev->shadow_en)
3013 dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
3014 readl_relaxed(dev->dm_core +
3015 PCIE20_ACK_F_ASPM_CTRL_REG);
3016
3017 PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3018 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3019
3020 /* configure AUX clock frequency register for PCIe core */
3021 if (dev->use_19p2mhz_aux_clk)
3022 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
3023 else
3024 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
3025
3026 /* configure the completion timeout value for PCIe core */
3027 if (dev->cpl_timeout && dev->bridge_found)
3028 msm_pcie_write_reg_field(dev->dm_core,
3029 PCIE20_DEVICE_CONTROL2_STATUS2,
3030 0xf, dev->cpl_timeout);
3031
3032 /* Enable AER on RC */
3033 if (dev->aer_enable) {
3034 msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
3035 BIT(16)|BIT(17));
3036 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
3037 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3038
3039 PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3040 readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
3041 }
Tony Truong349ee492014-10-01 17:35:56 -07003042}
3043
3044static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
3045{
3046 u32 val;
3047 u32 current_offset;
3048 u32 ep_l1sub_ctrl1_offset = 0;
3049 u32 ep_l1sub_cap_reg1_offset = 0;
3050 u32 ep_link_cap_offset = 0;
3051 u32 ep_link_ctrlstts_offset = 0;
3052 u32 ep_dev_ctrl2stts2_offset = 0;
3053
3054 /* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/
3055 if (!dev->aux_clk_sync && dev->l1ss_supported)
3056 msm_pcie_write_mask(dev->parf +
3057 PCIE20_PARF_SYS_CTRL, BIT(3), 0);
3058
3059 current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
3060
3061 while (current_offset) {
3062 if (msm_pcie_check_align(dev, current_offset))
3063 return;
3064
3065 val = readl_relaxed(dev->conf + current_offset);
3066 if ((val & 0xff) == PCIE20_CAP_ID) {
3067 ep_link_cap_offset = current_offset + 0x0c;
3068 ep_link_ctrlstts_offset = current_offset + 0x10;
3069 ep_dev_ctrl2stts2_offset = current_offset + 0x28;
3070 break;
3071 }
3072 current_offset = (val >> 8) & 0xff;
3073 }
3074
3075 if (!ep_link_cap_offset) {
3076 PCIE_DBG(dev,
3077 "RC%d endpoint does not support PCIe capability registers\n",
3078 dev->rc_idx);
3079 return;
3080 }
3081
3082 PCIE_DBG(dev,
3083 "RC%d: ep_link_cap_offset: 0x%x\n",
3084 dev->rc_idx, ep_link_cap_offset);
3085
3086 if (dev->common_clk_en) {
3087 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3088 0, BIT(6));
3089
3090 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3091 0, BIT(6));
3092
3093 if (dev->shadow_en) {
3094 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3095 readl_relaxed(dev->dm_core +
3096 PCIE20_CAP_LINKCTRLSTATUS);
3097
3098 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3099 readl_relaxed(dev->conf +
3100 ep_link_ctrlstts_offset);
3101 }
3102
3103 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3104 readl_relaxed(dev->dm_core +
3105 PCIE20_CAP_LINKCTRLSTATUS));
3106 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3107 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3108 }
3109
3110 if (dev->clk_power_manage_en) {
3111 val = readl_relaxed(dev->conf + ep_link_cap_offset);
3112 if (val & BIT(18)) {
3113 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3114 0, BIT(8));
3115
3116 if (dev->shadow_en)
3117 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3118 readl_relaxed(dev->conf +
3119 ep_link_ctrlstts_offset);
3120
3121 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3122 readl_relaxed(dev->conf +
3123 ep_link_ctrlstts_offset));
3124 }
3125 }
3126
3127 if (dev->l0s_supported) {
3128 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3129 0, BIT(0));
3130 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3131 0, BIT(0));
3132 if (dev->shadow_en) {
3133 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3134 readl_relaxed(dev->dm_core +
3135 PCIE20_CAP_LINKCTRLSTATUS);
3136 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3137 readl_relaxed(dev->conf +
3138 ep_link_ctrlstts_offset);
3139 }
3140 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3141 readl_relaxed(dev->dm_core +
3142 PCIE20_CAP_LINKCTRLSTATUS));
3143 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3144 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3145 }
3146
3147 if (dev->l1_supported) {
3148 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3149 0, BIT(1));
3150 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3151 0, BIT(1));
3152 if (dev->shadow_en) {
3153 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3154 readl_relaxed(dev->dm_core +
3155 PCIE20_CAP_LINKCTRLSTATUS);
3156 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3157 readl_relaxed(dev->conf +
3158 ep_link_ctrlstts_offset);
3159 }
3160 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3161 readl_relaxed(dev->dm_core +
3162 PCIE20_CAP_LINKCTRLSTATUS));
3163 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3164 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3165 }
3166
3167 if (dev->l1ss_supported) {
3168 current_offset = PCIE_EXT_CAP_OFFSET;
3169 while (current_offset) {
3170 if (msm_pcie_check_align(dev, current_offset))
3171 return;
3172
3173 val = readl_relaxed(dev->conf + current_offset);
3174 if ((val & 0xffff) == L1SUB_CAP_ID) {
3175 ep_l1sub_cap_reg1_offset = current_offset + 0x4;
3176 ep_l1sub_ctrl1_offset = current_offset + 0x8;
3177 break;
3178 }
3179 current_offset = val >> 20;
3180 }
3181 if (!ep_l1sub_ctrl1_offset) {
3182 PCIE_DBG(dev,
3183 "RC%d endpoint does not support l1ss registers\n",
3184 dev->rc_idx);
3185 return;
3186 }
3187
3188 val = readl_relaxed(dev->conf + ep_l1sub_cap_reg1_offset);
3189
3190 PCIE_DBG2(dev, "EP's L1SUB_CAPABILITY_REG_1: 0x%x\n", val);
3191 PCIE_DBG2(dev, "RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
3192 dev->rc_idx, ep_l1sub_ctrl1_offset);
3193
3194 val &= 0xf;
3195
3196 msm_pcie_write_reg_field(dev->dm_core, PCIE20_L1SUB_CONTROL1,
3197 0xf, val);
3198 msm_pcie_write_mask(dev->dm_core +
3199 PCIE20_DEVICE_CONTROL2_STATUS2,
3200 0, BIT(10));
3201 msm_pcie_write_reg_field(dev->conf, ep_l1sub_ctrl1_offset,
3202 0xf, val);
3203 msm_pcie_write_mask(dev->conf + ep_dev_ctrl2stts2_offset,
3204 0, BIT(10));
3205 if (dev->shadow_en) {
3206 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
3207 readl_relaxed(dev->dm_core +
3208 PCIE20_L1SUB_CONTROL1);
3209 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
3210 readl_relaxed(dev->dm_core +
3211 PCIE20_DEVICE_CONTROL2_STATUS2);
3212 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
3213 readl_relaxed(dev->conf +
3214 ep_l1sub_ctrl1_offset);
3215 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
3216 readl_relaxed(dev->conf +
3217 ep_dev_ctrl2stts2_offset);
3218 }
3219 PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n",
3220 readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1));
3221 PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
3222 readl_relaxed(dev->dm_core +
3223 PCIE20_DEVICE_CONTROL2_STATUS2));
3224 PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n",
3225 readl_relaxed(dev->conf + ep_l1sub_ctrl1_offset));
3226 PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
3227 readl_relaxed(dev->conf +
3228 ep_dev_ctrl2stts2_offset));
3229 }
3230}
3231
Stephen Boydb5b8fc32017-06-21 08:59:11 -07003232static void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07003233{
3234 int i;
3235
3236 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3237
3238 /* program MSI controller and enable all interrupts */
3239 writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
3240 writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
3241
3242 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
3243 writel_relaxed(~0, dev->dm_core +
3244 PCIE20_MSI_CTRL_INTR_EN + (i * 12));
3245
3246 /* ensure that hardware is configured before proceeding */
3247 wmb();
3248}
3249
3250static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
3251 struct platform_device *pdev)
3252{
3253 int i, len, cnt, ret = 0, size = 0;
3254 struct msm_pcie_vreg_info_t *vreg_info;
3255 struct msm_pcie_gpio_info_t *gpio_info;
3256 struct msm_pcie_clk_info_t *clk_info;
3257 struct resource *res;
3258 struct msm_pcie_res_info_t *res_info;
3259 struct msm_pcie_irq_info_t *irq_info;
3260 struct msm_pcie_irq_info_t *msi_info;
3261 struct msm_pcie_reset_info_t *reset_info;
3262 struct msm_pcie_reset_info_t *pipe_reset_info;
3263 char prop_name[MAX_PROP_SIZE];
3264 const __be32 *prop;
3265 u32 *clkfreq = NULL;
3266
3267 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3268
3269 cnt = of_property_count_strings((&pdev->dev)->of_node,
3270 "clock-names");
3271 if (cnt > 0) {
3272 clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
3273 sizeof(*clkfreq), GFP_KERNEL);
3274 if (!clkfreq) {
3275 PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
3276 dev->rc_idx);
3277 return -ENOMEM;
3278 }
3279 ret = of_property_read_u32_array(
3280 (&pdev->dev)->of_node,
3281 "max-clock-frequency-hz", clkfreq, cnt);
3282 if (ret) {
3283 PCIE_ERR(dev,
3284 "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
3285 dev->rc_idx, ret);
3286 goto out;
3287 }
3288 }
3289
3290 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
3291 vreg_info = &dev->vreg[i];
3292 vreg_info->hdl =
3293 devm_regulator_get(&pdev->dev, vreg_info->name);
3294
3295 if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
3296 PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
3297 vreg_info->name);
3298 ret = PTR_ERR(vreg_info->hdl);
3299 goto out;
3300 }
3301
3302 if (IS_ERR(vreg_info->hdl)) {
3303 if (vreg_info->required) {
3304 PCIE_DBG(dev, "Vreg %s doesn't exist\n",
3305 vreg_info->name);
3306 ret = PTR_ERR(vreg_info->hdl);
3307 goto out;
3308 } else {
3309 PCIE_DBG(dev,
3310 "Optional Vreg %s doesn't exist\n",
3311 vreg_info->name);
3312 vreg_info->hdl = NULL;
3313 }
3314 } else {
3315 dev->vreg_n++;
3316 snprintf(prop_name, MAX_PROP_SIZE,
3317 "qcom,%s-voltage-level", vreg_info->name);
3318 prop = of_get_property((&pdev->dev)->of_node,
3319 prop_name, &len);
3320 if (!prop || (len != (3 * sizeof(__be32)))) {
3321 PCIE_DBG(dev, "%s %s property\n",
3322 prop ? "invalid format" :
3323 "no", prop_name);
3324 } else {
3325 vreg_info->max_v = be32_to_cpup(&prop[0]);
3326 vreg_info->min_v = be32_to_cpup(&prop[1]);
3327 vreg_info->opt_mode =
3328 be32_to_cpup(&prop[2]);
3329 }
3330 }
3331 }
3332
3333 dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
3334
3335 if (IS_ERR(dev->gdsc)) {
3336 PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
3337 dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
3338 if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
3339 PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
3340 dev->pdev->name);
3341 ret = PTR_ERR(dev->gdsc);
3342 goto out;
3343 }
3344
3345 dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
3346
3347 if (IS_ERR(dev->gdsc_smmu)) {
3348 PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
3349 dev->rc_idx);
3350 dev->gdsc_smmu = NULL;
3351 }
3352
3353 dev->gpio_n = 0;
3354 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
3355 gpio_info = &dev->gpio[i];
3356 ret = of_get_named_gpio((&pdev->dev)->of_node,
3357 gpio_info->name, 0);
3358 if (ret >= 0) {
3359 gpio_info->num = ret;
3360 dev->gpio_n++;
3361 PCIE_DBG(dev, "GPIO num for %s is %d\n",
3362 gpio_info->name, gpio_info->num);
3363 } else {
3364 if (gpio_info->required) {
3365 PCIE_ERR(dev,
3366 "Could not get required GPIO %s\n",
3367 gpio_info->name);
3368 goto out;
3369 } else {
3370 PCIE_DBG(dev,
3371 "Could not get optional GPIO %s\n",
3372 gpio_info->name);
3373 }
3374 }
3375 ret = 0;
3376 }
3377
3378 of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
3379 if (size) {
3380 dev->phy_sequence = (struct msm_pcie_phy_info_t *)
3381 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
3382
3383 if (dev->phy_sequence) {
3384 dev->phy_len =
3385 size / sizeof(*dev->phy_sequence);
3386
3387 of_property_read_u32_array(pdev->dev.of_node,
3388 "qcom,phy-sequence",
3389 (unsigned int *)dev->phy_sequence,
3390 size / sizeof(dev->phy_sequence->offset));
3391 } else {
3392 PCIE_ERR(dev,
3393 "RC%d: Could not allocate memory for phy init sequence.\n",
3394 dev->rc_idx);
3395 ret = -ENOMEM;
3396 goto out;
3397 }
3398 } else {
3399 PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
3400 dev->rc_idx);
3401 }
3402
3403 of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
3404 if (size) {
3405 dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
3406 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
3407
3408 if (dev->port_phy_sequence) {
3409 dev->port_phy_len =
3410 size / sizeof(*dev->port_phy_sequence);
3411
3412 of_property_read_u32_array(pdev->dev.of_node,
3413 "qcom,port-phy-sequence",
3414 (unsigned int *)dev->port_phy_sequence,
3415 size / sizeof(dev->port_phy_sequence->offset));
3416 } else {
3417 PCIE_ERR(dev,
3418 "RC%d: Could not allocate memory for port phy init sequence.\n",
3419 dev->rc_idx);
3420 ret = -ENOMEM;
3421 goto out;
3422 }
3423 } else {
3424 PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
3425 dev->rc_idx);
3426 }
3427
3428 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3429 clk_info = &dev->clk[i];
3430
3431 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
3432
3433 if (IS_ERR(clk_info->hdl)) {
3434 if (clk_info->required) {
3435 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
3436 clk_info->name, PTR_ERR(clk_info->hdl));
3437 ret = PTR_ERR(clk_info->hdl);
3438 goto out;
3439 } else {
3440 PCIE_DBG(dev, "Ignoring Clock %s\n",
3441 clk_info->name);
3442 clk_info->hdl = NULL;
3443 }
3444 } else {
3445 if (clkfreq != NULL) {
3446 clk_info->freq = clkfreq[i +
3447 MSM_PCIE_MAX_PIPE_CLK];
3448 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
3449 clk_info->name, clk_info->freq);
3450 }
3451 }
3452 }
3453
3454 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3455 clk_info = &dev->pipeclk[i];
3456
3457 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
3458
3459 if (IS_ERR(clk_info->hdl)) {
3460 if (clk_info->required) {
3461 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
3462 clk_info->name, PTR_ERR(clk_info->hdl));
3463 ret = PTR_ERR(clk_info->hdl);
3464 goto out;
3465 } else {
3466 PCIE_DBG(dev, "Ignoring Clock %s\n",
3467 clk_info->name);
3468 clk_info->hdl = NULL;
3469 }
3470 } else {
3471 if (clkfreq != NULL) {
3472 clk_info->freq = clkfreq[i];
3473 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
3474 clk_info->name, clk_info->freq);
3475 }
3476 }
3477 }
3478
3479 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3480 reset_info = &dev->reset[i];
3481
3482 reset_info->hdl = devm_reset_control_get(&pdev->dev,
3483 reset_info->name);
3484
3485 if (IS_ERR(reset_info->hdl)) {
3486 if (reset_info->required) {
3487 PCIE_DBG(dev,
3488 "Reset %s isn't available:%ld\n",
3489 reset_info->name,
3490 PTR_ERR(reset_info->hdl));
3491
3492 ret = PTR_ERR(reset_info->hdl);
3493 reset_info->hdl = NULL;
3494 goto out;
3495 } else {
3496 PCIE_DBG(dev, "Ignoring Reset %s\n",
3497 reset_info->name);
3498 reset_info->hdl = NULL;
3499 }
3500 }
3501 }
3502
3503 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3504 pipe_reset_info = &dev->pipe_reset[i];
3505
3506 pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
3507 pipe_reset_info->name);
3508
3509 if (IS_ERR(pipe_reset_info->hdl)) {
3510 if (pipe_reset_info->required) {
3511 PCIE_DBG(dev,
3512 "Pipe Reset %s isn't available:%ld\n",
3513 pipe_reset_info->name,
3514 PTR_ERR(pipe_reset_info->hdl));
3515
3516 ret = PTR_ERR(pipe_reset_info->hdl);
3517 pipe_reset_info->hdl = NULL;
3518 goto out;
3519 } else {
3520 PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
3521 pipe_reset_info->name);
3522 pipe_reset_info->hdl = NULL;
3523 }
3524 }
3525 }
3526
3527 dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3528 if (!dev->bus_scale_table) {
3529 PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
3530 dev->rc_idx, dev->pdev->name);
3531 dev->bus_client = 0;
3532 } else {
3533 dev->bus_client =
3534 msm_bus_scale_register_client(dev->bus_scale_table);
3535 if (!dev->bus_client) {
3536 PCIE_ERR(dev,
3537 "PCIe: Failed to register bus client for RC%d (%s)\n",
3538 dev->rc_idx, dev->pdev->name);
3539 msm_bus_cl_clear_pdata(dev->bus_scale_table);
3540 ret = -ENODEV;
3541 goto out;
3542 }
3543 }
3544
3545 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
3546 res_info = &dev->res[i];
3547
3548 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3549 res_info->name);
3550
3551 if (!res) {
3552 PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
3553 dev->rc_idx, res_info->name);
3554 } else {
3555 PCIE_DBG(dev, "start addr for %s is %pa.\n",
3556 res_info->name, &res->start);
3557
3558 res_info->base = devm_ioremap(&pdev->dev,
3559 res->start, resource_size(res));
3560 if (!res_info->base) {
3561 PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
3562 dev->rc_idx, res_info->name);
3563 ret = -ENOMEM;
3564 goto out;
3565 } else {
3566 res_info->resource = res;
3567 }
3568 }
3569 }
3570
3571 for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
3572 irq_info = &dev->irq[i];
3573
3574 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
3575 irq_info->name);
3576
3577 if (!res) {
3578 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
3579 dev->rc_idx, irq_info->name);
3580 } else {
3581 irq_info->num = res->start;
3582 PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
3583 irq_info->num);
3584 }
3585 }
3586
3587 for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
3588 msi_info = &dev->msi[i];
3589
3590 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
3591 msi_info->name);
3592
3593 if (!res) {
3594 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
3595 dev->rc_idx, msi_info->name);
3596 } else {
3597 msi_info->num = res->start;
3598 PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
3599 msi_info->num);
3600 }
3601 }
3602
3603 /* All allocations succeeded */
3604
3605 if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
3606 dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
3607 else
3608 dev->wake_n = 0;
3609
3610 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
3611 dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
3612 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
3613 dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
3614 dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
3615 dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
3616 dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
3617 dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
3618 dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
3619 dev->dev_io_res->flags = IORESOURCE_IO;
3620
3621out:
3622 kfree(clkfreq);
3623
3624 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3625
3626 return ret;
3627}
3628
3629static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
3630{
3631 dev->parf = NULL;
3632 dev->elbi = NULL;
3633 dev->dm_core = NULL;
3634 dev->conf = NULL;
3635 dev->bars = NULL;
3636 dev->tcsr = NULL;
3637 dev->dev_mem_res = NULL;
3638 dev->dev_io_res = NULL;
3639}
3640
Stephen Boydb5b8fc32017-06-21 08:59:11 -07003641static int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
Tony Truong349ee492014-10-01 17:35:56 -07003642{
3643 int ret = 0;
3644 uint32_t val;
3645 long int retries = 0;
3646 int link_check_count = 0;
3647
3648 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3649
3650 mutex_lock(&dev->setup_lock);
3651
3652 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
3653 PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
3654 dev->rc_idx);
3655 goto out;
3656 }
3657
3658 /* assert PCIe reset link to keep EP in reset */
3659
3660 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3661 dev->rc_idx);
3662 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3663 dev->gpio[MSM_PCIE_GPIO_PERST].on);
3664 usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
3665 PERST_PROPAGATION_DELAY_US_MAX);
3666
3667 /* enable power */
3668
3669 if (options & PM_VREG) {
3670 ret = msm_pcie_vreg_init(dev);
3671 if (ret)
3672 goto out;
3673 }
3674
3675 /* enable clocks */
3676 if (options & PM_CLK) {
3677 ret = msm_pcie_clk_init(dev);
3678 /* ensure that changes propagated to the hardware */
3679 wmb();
3680 if (ret)
3681 goto clk_fail;
3682 }
3683
3684 if (dev->scm_dev_id) {
3685 PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
3686 msm_pcie_restore_sec_config(dev);
3687 }
3688
Tony Truongb213ac12017-04-05 15:21:20 -07003689 /* configure PCIe to RC mode */
3690 msm_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x4);
3691
3692 /* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
3693 if (dev->l1_supported)
3694 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
3695
Tony Truong349ee492014-10-01 17:35:56 -07003696 /* enable PCIe clocks and resets */
3697 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
3698
3699 /* change DBI base address */
3700 writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
3701
3702 writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
3703
3704 msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
3705 0, BIT(4));
3706
3707 /* enable selected IRQ */
3708 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
3709 msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
3710
3711 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
3712 BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
3713 BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
3714 BIT(MSM_PCIE_INT_EVT_AER_ERR) |
3715 BIT(MSM_PCIE_INT_EVT_MSI_0) |
3716 BIT(MSM_PCIE_INT_EVT_MSI_1) |
3717 BIT(MSM_PCIE_INT_EVT_MSI_2) |
3718 BIT(MSM_PCIE_INT_EVT_MSI_3) |
3719 BIT(MSM_PCIE_INT_EVT_MSI_4) |
3720 BIT(MSM_PCIE_INT_EVT_MSI_5) |
3721 BIT(MSM_PCIE_INT_EVT_MSI_6) |
3722 BIT(MSM_PCIE_INT_EVT_MSI_7));
3723
3724 PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
3725 dev->rc_idx,
3726 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
3727 }
3728
3729 if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_16M)
3730 writel_relaxed(SZ_32M, dev->parf +
3731 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
3732 else if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_8M)
3733 writel_relaxed(SZ_16M, dev->parf +
3734 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
3735 else
3736 writel_relaxed(SZ_8M, dev->parf +
3737 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
3738
3739 if (dev->use_msi) {
3740 PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
3741 val = dev->wr_halt_size ? dev->wr_halt_size :
3742 readl_relaxed(dev->parf +
3743 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
3744
3745 msm_pcie_write_reg(dev->parf,
3746 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
3747 BIT(31) | val);
3748
3749 PCIE_DBG(dev,
3750 "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
3751 dev->rc_idx,
3752 readl_relaxed(dev->parf +
3753 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
3754 }
3755
3756 mutex_lock(&com_phy_lock);
3757 /* init PCIe PHY */
3758 if (!num_rc_on)
3759 pcie_phy_init(dev);
3760
3761 num_rc_on++;
3762 mutex_unlock(&com_phy_lock);
3763
3764 if (options & PM_PIPE_CLK) {
3765 usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
3766 PHY_STABILIZATION_DELAY_US_MAX);
3767 /* Enable the pipe clock */
3768 ret = msm_pcie_pipe_clk_init(dev);
3769 /* ensure that changes propagated to the hardware */
3770 wmb();
3771 if (ret)
3772 goto link_fail;
3773 }
3774
3775 PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
3776
3777 do {
3778 if (pcie_phy_is_ready(dev))
3779 break;
3780 retries++;
3781 usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
3782 REFCLK_STABILIZATION_DELAY_US_MAX);
3783 } while (retries < PHY_READY_TIMEOUT_COUNT);
3784
3785 PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
3786 dev->rc_idx, retries);
3787
3788 if (pcie_phy_is_ready(dev))
3789 PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
3790 else {
3791 PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
3792 dev->rc_idx);
3793 ret = -ENODEV;
3794 pcie_phy_dump(dev);
3795 goto link_fail;
3796 }
3797
3798 pcie_pcs_port_phy_init(dev);
3799
3800 if (dev->ep_latency)
3801 usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
3802
3803 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
3804 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
3805 dev->gpio[MSM_PCIE_GPIO_EP].on);
3806
3807 /* de-assert PCIe reset link to bring EP out of reset */
3808
3809 PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
3810 dev->rc_idx);
3811 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3812 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
3813 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
3814
3815 /* set max tlp read size */
3816 msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
3817 0x7000, dev->tlp_rd_size);
3818
3819 /* enable link training */
3820 msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
3821
3822 PCIE_DBG(dev, "%s", "check if link is up\n");
3823
3824 /* Wait for up to 100ms for the link to come up */
3825 do {
3826 usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
3827 val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
Tony Truongf9f5ff02017-03-29 11:28:44 -07003828 PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
3829 dev->rc_idx, (val >> 12) & 0x3f);
Tony Truong349ee492014-10-01 17:35:56 -07003830 } while ((!(val & XMLH_LINK_UP) ||
3831 !msm_pcie_confirm_linkup(dev, false, false, NULL))
3832 && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
3833
3834 if ((val & XMLH_LINK_UP) &&
3835 msm_pcie_confirm_linkup(dev, false, false, NULL)) {
3836 PCIE_DBG(dev, "Link is up after %d checkings\n",
3837 link_check_count);
3838 PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
3839 } else {
3840 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3841 dev->rc_idx);
3842 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3843 dev->gpio[MSM_PCIE_GPIO_PERST].on);
3844 PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
3845 dev->rc_idx);
3846 ret = -1;
3847 goto link_fail;
3848 }
3849
3850 msm_pcie_config_controller(dev);
3851
3852 if (!dev->msi_gicm_addr)
3853 msm_pcie_config_msi_controller(dev);
3854
3855 msm_pcie_config_link_state(dev);
3856
Tony Truong7772e692017-04-13 17:03:34 -07003857 if (dev->enumerated)
3858 pci_walk_bus(dev->dev->bus, &msm_pcie_config_device, dev);
3859
Tony Truong349ee492014-10-01 17:35:56 -07003860 dev->link_status = MSM_PCIE_LINK_ENABLED;
3861 dev->power_on = true;
3862 dev->suspending = false;
3863 dev->link_turned_on_counter++;
3864
3865 goto out;
3866
3867link_fail:
3868 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
3869 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
3870 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
3871 msm_pcie_write_reg(dev->phy,
3872 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
3873 msm_pcie_write_reg(dev->phy,
3874 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
3875
3876 mutex_lock(&com_phy_lock);
3877 num_rc_on--;
3878 if (!num_rc_on && dev->common_phy) {
3879 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
3880 dev->rc_idx);
3881 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
3882 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
3883 }
3884 mutex_unlock(&com_phy_lock);
3885
3886 msm_pcie_pipe_clk_deinit(dev);
3887 msm_pcie_clk_deinit(dev);
3888clk_fail:
3889 msm_pcie_vreg_deinit(dev);
3890out:
3891 mutex_unlock(&dev->setup_lock);
3892
3893 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3894
3895 return ret;
3896}
3897
Stephen Boydb5b8fc32017-06-21 08:59:11 -07003898static void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
Tony Truong349ee492014-10-01 17:35:56 -07003899{
3900 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3901
3902 mutex_lock(&dev->setup_lock);
3903
3904 if (!dev->power_on) {
3905 PCIE_DBG(dev,
3906 "PCIe: the link of RC%d is already power down.\n",
3907 dev->rc_idx);
3908 mutex_unlock(&dev->setup_lock);
3909 return;
3910 }
3911
3912 dev->link_status = MSM_PCIE_LINK_DISABLED;
3913 dev->power_on = false;
3914 dev->link_turned_off_counter++;
3915
3916 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3917 dev->rc_idx);
3918
3919 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3920 dev->gpio[MSM_PCIE_GPIO_PERST].on);
3921
3922 msm_pcie_write_reg(dev->phy,
3923 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
3924 msm_pcie_write_reg(dev->phy,
3925 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
3926
3927 mutex_lock(&com_phy_lock);
3928 num_rc_on--;
3929 if (!num_rc_on && dev->common_phy) {
3930 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
3931 dev->rc_idx);
3932 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
3933 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
3934 }
3935 mutex_unlock(&com_phy_lock);
3936
3937 if (options & PM_CLK) {
3938 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
3939 BIT(0));
3940 msm_pcie_clk_deinit(dev);
3941 }
3942
3943 if (options & PM_VREG)
3944 msm_pcie_vreg_deinit(dev);
3945
3946 if (options & PM_PIPE_CLK)
3947 msm_pcie_pipe_clk_deinit(dev);
3948
3949 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
3950 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
3951 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
3952
3953 mutex_unlock(&dev->setup_lock);
3954
3955 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3956}
3957
3958static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
3959 struct msm_pcie_device_info *ep_dev_info)
3960{
3961 u32 val;
3962 void __iomem *ep_base = ep_dev_info->conf_base;
3963 u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
3964 0xff;
3965
3966 while (current_offset) {
3967 if (msm_pcie_check_align(dev, current_offset))
3968 return;
3969
3970 val = readl_relaxed(ep_base + current_offset);
3971 if ((val & 0xff) == PCIE20_CAP_ID) {
3972 ep_dev_info->dev_ctrlstts_offset =
3973 current_offset + 0x8;
3974 break;
3975 }
3976 current_offset = (val >> 8) & 0xff;
3977 }
3978
3979 if (!ep_dev_info->dev_ctrlstts_offset) {
3980 PCIE_DBG(dev,
3981 "RC%d endpoint does not support PCIe cap registers\n",
3982 dev->rc_idx);
3983 return;
3984 }
3985
3986 PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
3987 dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
3988
3989 /* Enable AER on EP */
3990 msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
3991 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3992
3993 PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3994 readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
3995}
3996
3997static int msm_pcie_config_device_table(struct device *dev, void *pdev)
3998{
3999 struct pci_dev *pcidev = to_pci_dev(dev);
4000 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
4001 struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
4002 struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
4003 int ret = 0;
4004 u32 rc_idx = pcie_dev->rc_idx;
4005 u32 i, index;
4006 u32 bdf = 0;
4007 u8 type;
4008 u32 h_type;
4009 u32 bme;
4010
4011 if (!pcidev) {
4012 PCIE_ERR(pcie_dev,
4013 "PCIe: Did not find PCI device in list for RC%d.\n",
4014 pcie_dev->rc_idx);
4015 return -ENODEV;
4016 }
4017
4018 PCIE_DBG(pcie_dev,
4019 "PCI device found: vendor-id:0x%x device-id:0x%x\n",
4020 pcidev->vendor, pcidev->device);
4021
4022 if (!pcidev->bus->number)
4023 return ret;
4024
4025 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4026 type = pcidev->bus->number == 1 ?
4027 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
4028
4029 for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
4030 if (msm_pcie_dev_tbl[i].bdf == bdf &&
4031 !msm_pcie_dev_tbl[i].dev) {
4032 for (index = 0; index < MAX_DEVICE_NUM; index++) {
4033 if (dev_table_t[index].bdf == bdf) {
4034 msm_pcie_dev_tbl[i].dev = pcidev;
4035 msm_pcie_dev_tbl[i].domain = rc_idx;
4036 msm_pcie_dev_tbl[i].conf_base =
4037 pcie_dev->conf + index * SZ_4K;
4038 msm_pcie_dev_tbl[i].phy_address =
4039 axi_conf->start + index * SZ_4K;
4040
4041 dev_table_t[index].dev = pcidev;
4042 dev_table_t[index].domain = rc_idx;
4043 dev_table_t[index].conf_base =
4044 pcie_dev->conf + index * SZ_4K;
4045 dev_table_t[index].phy_address =
4046 axi_conf->start + index * SZ_4K;
4047
4048 msm_pcie_iatu_config(pcie_dev, index,
4049 type,
4050 dev_table_t[index].phy_address,
4051 dev_table_t[index].phy_address
4052 + SZ_4K - 1,
4053 bdf);
4054
4055 h_type = readl_relaxed(
4056 dev_table_t[index].conf_base +
4057 PCIE20_HEADER_TYPE);
4058
4059 bme = readl_relaxed(
4060 dev_table_t[index].conf_base +
4061 PCIE20_COMMAND_STATUS);
4062
4063 if (h_type & (1 << 16)) {
4064 pci_write_config_dword(pcidev,
4065 PCIE20_COMMAND_STATUS,
4066 bme | 0x06);
4067 } else {
4068 pcie_dev->num_ep++;
4069 dev_table_t[index].registered =
4070 false;
4071 }
4072
4073 if (pcie_dev->num_ep > 1)
4074 pcie_dev->pending_ep_reg = true;
4075
4076 msm_pcie_config_ep_aer(pcie_dev,
4077 &dev_table_t[index]);
4078
4079 break;
4080 }
4081 }
4082 if (index == MAX_DEVICE_NUM) {
4083 PCIE_ERR(pcie_dev,
4084 "RC%d PCI device table is full.\n",
4085 rc_idx);
4086 ret = index;
4087 } else {
4088 break;
4089 }
4090 } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
4091 pcidev == msm_pcie_dev_tbl[i].dev) {
4092 break;
4093 }
4094 }
4095 if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
4096 PCIE_ERR(pcie_dev,
4097 "Global PCI device table is full: %d elements.\n",
4098 i);
4099 PCIE_ERR(pcie_dev,
4100 "Bus number is 0x%x\nDevice number is 0x%x\n",
4101 pcidev->bus->number, pcidev->devfn);
4102 ret = i;
4103 }
4104 return ret;
4105}
4106
Tony Truong2a022a02017-04-13 14:04:30 -07004107static void msm_pcie_configure_sid(struct msm_pcie_dev_t *pcie_dev,
4108 struct pci_dev *dev)
Tony Truong349ee492014-10-01 17:35:56 -07004109{
Tony Truong2a022a02017-04-13 14:04:30 -07004110 u32 offset;
4111 u32 sid;
Tony Truong349ee492014-10-01 17:35:56 -07004112 u32 bdf;
Tony Truong2a022a02017-04-13 14:04:30 -07004113 int ret;
Tony Truong349ee492014-10-01 17:35:56 -07004114
Tony Truong2a022a02017-04-13 14:04:30 -07004115 ret = iommu_fwspec_get_id(&dev->dev, &sid);
4116 if (ret) {
Tony Truong349ee492014-10-01 17:35:56 -07004117 PCIE_DBG(pcie_dev,
Tony Truong2a022a02017-04-13 14:04:30 -07004118 "PCIe: RC%d: Device does not have a SID\n",
Tony Truong349ee492014-10-01 17:35:56 -07004119 pcie_dev->rc_idx);
Tony Truong2a022a02017-04-13 14:04:30 -07004120 return;
Tony Truong349ee492014-10-01 17:35:56 -07004121 }
4122
4123 PCIE_DBG(pcie_dev,
Tony Truong2a022a02017-04-13 14:04:30 -07004124 "PCIe: RC%d: Device SID: 0x%x\n",
4125 pcie_dev->rc_idx, sid);
Tony Truong349ee492014-10-01 17:35:56 -07004126
Tony Truong2a022a02017-04-13 14:04:30 -07004127 bdf = BDF_OFFSET(dev->bus->number, dev->devfn);
4128 offset = (sid - pcie_dev->smmu_sid_base) * 4;
4129
4130 if (offset >= MAX_SHORT_BDF_NUM * 4) {
4131 PCIE_ERR(pcie_dev,
4132 "PCIe: RC%d: Invalid SID offset: 0x%x. Should be less than 0x%x\n",
4133 pcie_dev->rc_idx, offset, MAX_SHORT_BDF_NUM * 4);
4134 return;
4135 }
4136
4137 msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
4138 msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_SID_OFFSET, 0);
4139 msm_pcie_write_reg(pcie_dev->parf,
4140 PCIE20_PARF_BDF_TRANSLATE_N + offset, bdf >> 16);
Tony Truong349ee492014-10-01 17:35:56 -07004141}
Tony Truong349ee492014-10-01 17:35:56 -07004142
4143int msm_pcie_enumerate(u32 rc_idx)
4144{
4145 int ret = 0, bus_ret = 0, scan_ret = 0;
4146 struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
4147
4148 mutex_lock(&dev->enumerate_lock);
4149
4150 PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
4151
4152 if (!dev->drv_ready) {
4153 PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
4154 rc_idx);
4155 ret = -EPROBE_DEFER;
4156 goto out;
4157 }
4158
4159 if (!dev->enumerated) {
4160 ret = msm_pcie_enable(dev, PM_ALL);
4161
4162 /* kick start ARM PCI configuration framework */
4163 if (!ret) {
4164 struct pci_dev *pcidev = NULL;
4165 bool found = false;
4166 struct pci_bus *bus;
4167 resource_size_t iobase = 0;
4168 u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
4169 u32 vendor_id = ids & 0xffff;
4170 u32 device_id = (ids & 0xffff0000) >> 16;
4171 LIST_HEAD(res);
4172
4173 PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
4174 vendor_id, device_id);
4175
4176 ret = of_pci_get_host_bridge_resources(
4177 dev->pdev->dev.of_node,
4178 0, 0xff, &res, &iobase);
4179 if (ret) {
4180 PCIE_ERR(dev,
4181 "PCIe: failed to get host bridge resources for RC%d: %d\n",
4182 dev->rc_idx, ret);
4183 goto out;
4184 }
4185
4186 bus = pci_create_root_bus(&dev->pdev->dev, 0,
4187 &msm_pcie_ops,
4188 msm_pcie_setup_sys_data(dev),
4189 &res);
4190 if (!bus) {
4191 PCIE_ERR(dev,
4192 "PCIe: failed to create root bus for RC%d\n",
4193 dev->rc_idx);
4194 ret = -ENOMEM;
4195 goto out;
4196 }
4197
4198 scan_ret = pci_scan_child_bus(bus);
4199 PCIE_DBG(dev,
4200 "PCIe: RC%d: The max subordinate bus number discovered is %d\n",
4201 dev->rc_idx, ret);
4202
4203 msm_pcie_fixup_irqs(dev);
4204 pci_assign_unassigned_bus_resources(bus);
4205 pci_bus_add_devices(bus);
4206
4207 dev->enumerated = true;
4208
4209 msm_pcie_write_mask(dev->dm_core +
4210 PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
4211
4212 if (dev->cpl_timeout && dev->bridge_found)
4213 msm_pcie_write_reg_field(dev->dm_core,
4214 PCIE20_DEVICE_CONTROL2_STATUS2,
4215 0xf, dev->cpl_timeout);
4216
4217 if (dev->shadow_en) {
4218 u32 val = readl_relaxed(dev->dm_core +
4219 PCIE20_COMMAND_STATUS);
4220 PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
4221 val);
4222 dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
4223 }
4224
4225 do {
4226 pcidev = pci_get_device(vendor_id,
4227 device_id, pcidev);
4228 if (pcidev && (&msm_pcie_dev[rc_idx] ==
4229 (struct msm_pcie_dev_t *)
4230 PCIE_BUS_PRIV_DATA(pcidev->bus))) {
4231 msm_pcie_dev[rc_idx].dev = pcidev;
4232 found = true;
4233 PCIE_DBG(&msm_pcie_dev[rc_idx],
4234 "PCI device is found for RC%d\n",
4235 rc_idx);
4236 }
4237 } while (!found && pcidev);
4238
4239 if (!pcidev) {
4240 PCIE_ERR(dev,
4241 "PCIe: Did not find PCI device for RC%d.\n",
4242 dev->rc_idx);
4243 ret = -ENODEV;
4244 goto out;
4245 }
4246
4247 bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
4248 &msm_pcie_config_device_table);
4249
4250 if (bus_ret) {
4251 PCIE_ERR(dev,
4252 "PCIe: Failed to set up device table for RC%d\n",
4253 dev->rc_idx);
4254 ret = -ENODEV;
4255 goto out;
4256 }
4257 } else {
4258 PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
4259 dev->rc_idx);
4260 }
4261 } else {
4262 PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
4263 dev->rc_idx);
4264 }
4265
4266out:
4267 mutex_unlock(&dev->enumerate_lock);
4268
4269 return ret;
4270}
4271EXPORT_SYMBOL(msm_pcie_enumerate);
4272
4273static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
4274 enum msm_pcie_event event)
4275{
4276 if (dev->event_reg && dev->event_reg->callback &&
4277 (dev->event_reg->events & event)) {
4278 struct msm_pcie_notify *notify = &dev->event_reg->notify;
4279
4280 notify->event = event;
4281 notify->user = dev->event_reg->user;
4282 PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
4283 dev->rc_idx, event);
4284 dev->event_reg->callback(notify);
4285
4286 if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
4287 (event == MSM_PCIE_EVENT_LINKDOWN)) {
4288 dev->user_suspend = true;
4289 PCIE_DBG(dev,
4290 "PCIe: Client of RC%d will recover the link later.\n",
4291 dev->rc_idx);
4292 return;
4293 }
4294 } else {
4295 PCIE_DBG2(dev,
4296 "PCIe: Client of RC%d does not have registration for event %d\n",
4297 dev->rc_idx, event);
4298 }
4299}
4300
4301static void handle_wake_func(struct work_struct *work)
4302{
4303 int i, ret;
4304 struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
4305 handle_wake_work);
4306
4307 PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
4308
4309 mutex_lock(&dev->recovery_lock);
4310
4311 if (!dev->enumerated) {
4312 PCIE_DBG(dev,
4313 "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
4314 dev->rc_idx);
4315
4316 ret = msm_pcie_enumerate(dev->rc_idx);
4317 if (ret) {
4318 PCIE_ERR(dev,
4319 "PCIe: failed to enable RC%d upon wake request from the device.\n",
4320 dev->rc_idx);
4321 goto out;
4322 }
4323
4324 if (dev->num_ep > 1) {
4325 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4326 dev->event_reg = dev->pcidev_table[i].event_reg;
4327
4328 if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
4329 && dev->event_reg &&
4330 dev->event_reg->callback &&
4331 (dev->event_reg->events &
4332 MSM_PCIE_EVENT_LINKUP)) {
4333 struct msm_pcie_notify *notify =
4334 &dev->event_reg->notify;
4335 notify->event = MSM_PCIE_EVENT_LINKUP;
4336 notify->user = dev->event_reg->user;
4337 PCIE_DBG(dev,
4338 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
4339 dev->rc_idx);
4340 dev->event_reg->callback(notify);
4341 }
4342 }
4343 } else {
4344 if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
4345 dev->event_reg && dev->event_reg->callback &&
4346 (dev->event_reg->events &
4347 MSM_PCIE_EVENT_LINKUP)) {
4348 struct msm_pcie_notify *notify =
4349 &dev->event_reg->notify;
4350 notify->event = MSM_PCIE_EVENT_LINKUP;
4351 notify->user = dev->event_reg->user;
4352 PCIE_DBG(dev,
4353 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
4354 dev->rc_idx);
4355 dev->event_reg->callback(notify);
4356 } else {
4357 PCIE_DBG(dev,
4358 "PCIe: Client of RC%d does not have registration for linkup event.\n",
4359 dev->rc_idx);
4360 }
4361 }
4362 goto out;
4363 } else {
4364 PCIE_ERR(dev,
4365 "PCIe: The enumeration for RC%d has already been done.\n",
4366 dev->rc_idx);
4367 goto out;
4368 }
4369
4370out:
4371 mutex_unlock(&dev->recovery_lock);
4372}
4373
4374static irqreturn_t handle_aer_irq(int irq, void *data)
4375{
4376 struct msm_pcie_dev_t *dev = data;
4377
4378 int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
4379 int ep_corr_val = 0, ep_uncorr_val = 0;
4380 int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
4381 u32 ep_dev_ctrlstts_offset = 0;
4382 int i, j, ep_src_bdf = 0;
4383 void __iomem *ep_base = NULL;
4384 unsigned long irqsave_flags;
4385
4386 PCIE_DBG2(dev,
4387 "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
4388 dev->rc_idx, irq, dev->rc_corr_counter,
4389 dev->rc_non_fatal_counter, dev->rc_fatal_counter,
4390 dev->ep_corr_counter, dev->ep_non_fatal_counter,
4391 dev->ep_fatal_counter);
4392
4393 spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
4394
4395 if (dev->suspending) {
4396 PCIE_DBG2(dev,
4397 "PCIe: RC%d is currently suspending.\n",
4398 dev->rc_idx);
4399 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
4400 return IRQ_HANDLED;
4401 }
4402
4403 uncorr_val = readl_relaxed(dev->dm_core +
4404 PCIE20_AER_UNCORR_ERR_STATUS_REG);
4405 corr_val = readl_relaxed(dev->dm_core +
4406 PCIE20_AER_CORR_ERR_STATUS_REG);
4407 rc_err_status = readl_relaxed(dev->dm_core +
4408 PCIE20_AER_ROOT_ERR_STATUS_REG);
4409 rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
4410 PCIE20_CAP_DEVCTRLSTATUS);
4411
4412 if (uncorr_val)
4413 PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
4414 uncorr_val);
4415 if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
4416 PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
4417 corr_val);
4418
4419 if ((rc_dev_ctrlstts >> 18) & 0x1)
4420 dev->rc_fatal_counter++;
4421 if ((rc_dev_ctrlstts >> 17) & 0x1)
4422 dev->rc_non_fatal_counter++;
4423 if ((rc_dev_ctrlstts >> 16) & 0x1)
4424 dev->rc_corr_counter++;
4425
4426 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
4427 BIT(18)|BIT(17)|BIT(16));
4428
4429 if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
4430 PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
4431 goto out;
4432 }
4433
4434 for (i = 0; i < 2; i++) {
4435 if (i)
4436 ep_src_bdf = readl_relaxed(dev->dm_core +
4437 PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
4438 else
4439 ep_src_bdf = (readl_relaxed(dev->dm_core +
4440 PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
4441
4442 if (!ep_src_bdf)
4443 continue;
4444
4445 for (j = 0; j < MAX_DEVICE_NUM; j++) {
4446 if (ep_src_bdf == dev->pcidev_table[j].bdf) {
4447 PCIE_DBG2(dev,
4448 "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
4449 i ? "Uncorrectable" : "Correctable",
4450 dev->pcidev_table[j].bdf >> 24,
4451 dev->pcidev_table[j].bdf >> 19 & 0x1f,
4452 dev->pcidev_table[j].bdf >> 16 & 0x07);
4453 ep_base = dev->pcidev_table[j].conf_base;
4454 ep_dev_ctrlstts_offset = dev->
4455 pcidev_table[j].dev_ctrlstts_offset;
4456 break;
4457 }
4458 }
4459
4460 if (!ep_base) {
4461 PCIE_ERR(dev,
4462 "PCIe: RC%d no endpoint found for reported error\n",
4463 dev->rc_idx);
4464 goto out;
4465 }
4466
4467 ep_uncorr_val = readl_relaxed(ep_base +
4468 PCIE20_AER_UNCORR_ERR_STATUS_REG);
4469 ep_corr_val = readl_relaxed(ep_base +
4470 PCIE20_AER_CORR_ERR_STATUS_REG);
4471 ep_dev_ctrlstts = readl_relaxed(ep_base +
4472 ep_dev_ctrlstts_offset);
4473
4474 if (ep_uncorr_val)
4475 PCIE_DBG(dev,
4476 "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
4477 ep_uncorr_val);
4478 if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
4479 PCIE_DBG(dev,
4480 "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
4481 ep_corr_val);
4482
4483 if ((ep_dev_ctrlstts >> 18) & 0x1)
4484 dev->ep_fatal_counter++;
4485 if ((ep_dev_ctrlstts >> 17) & 0x1)
4486 dev->ep_non_fatal_counter++;
4487 if ((ep_dev_ctrlstts >> 16) & 0x1)
4488 dev->ep_corr_counter++;
4489
4490 msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
4491 BIT(18)|BIT(17)|BIT(16));
4492
4493 msm_pcie_write_reg_field(ep_base,
4494 PCIE20_AER_UNCORR_ERR_STATUS_REG,
4495 0x3fff031, 0x3fff031);
4496 msm_pcie_write_reg_field(ep_base,
4497 PCIE20_AER_CORR_ERR_STATUS_REG,
4498 0xf1c1, 0xf1c1);
4499 }
4500out:
4501 if (((dev->rc_corr_counter < corr_counter_limit) &&
4502 (dev->ep_corr_counter < corr_counter_limit)) ||
4503 uncorr_val || ep_uncorr_val)
4504 PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
4505 rc_err_status);
4506 msm_pcie_write_reg_field(dev->dm_core,
4507 PCIE20_AER_UNCORR_ERR_STATUS_REG,
4508 0x3fff031, 0x3fff031);
4509 msm_pcie_write_reg_field(dev->dm_core,
4510 PCIE20_AER_CORR_ERR_STATUS_REG,
4511 0xf1c1, 0xf1c1);
4512 msm_pcie_write_reg_field(dev->dm_core,
4513 PCIE20_AER_ROOT_ERR_STATUS_REG,
4514 0x7f, 0x7f);
4515
4516 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
4517 return IRQ_HANDLED;
4518}
4519
4520static irqreturn_t handle_wake_irq(int irq, void *data)
4521{
4522 struct msm_pcie_dev_t *dev = data;
4523 unsigned long irqsave_flags;
4524 int i;
4525
4526 spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
4527
4528 dev->wake_counter++;
4529 PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
4530 dev->wake_counter, dev->rc_idx);
4531
4532 PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
4533 dev->rc_idx);
4534
Tony Truong9f2c7722017-02-28 15:02:27 -08004535 if (!dev->enumerated && !(dev->boot_option &
4536 MSM_PCIE_NO_WAKE_ENUMERATION)) {
4537 PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
4538 schedule_work(&dev->handle_wake_work);
Tony Truong349ee492014-10-01 17:35:56 -07004539 } else {
4540 PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
4541 __pm_stay_awake(&dev->ws);
4542 __pm_relax(&dev->ws);
4543
4544 if (dev->num_ep > 1) {
4545 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4546 dev->event_reg =
4547 dev->pcidev_table[i].event_reg;
4548 msm_pcie_notify_client(dev,
4549 MSM_PCIE_EVENT_WAKEUP);
4550 }
4551 } else {
4552 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
4553 }
4554 }
4555
4556 spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
4557
4558 return IRQ_HANDLED;
4559}
4560
4561static irqreturn_t handle_linkdown_irq(int irq, void *data)
4562{
4563 struct msm_pcie_dev_t *dev = data;
4564 unsigned long irqsave_flags;
4565 int i;
4566
4567 spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
4568
4569 dev->linkdown_counter++;
4570
4571 PCIE_DBG(dev,
4572 "PCIe: No. %ld linkdown IRQ for RC%d.\n",
4573 dev->linkdown_counter, dev->rc_idx);
4574
4575 if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
4576 PCIE_DBG(dev,
4577 "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
4578 dev->rc_idx);
4579 } else if (dev->suspending) {
4580 PCIE_DBG(dev,
4581 "PCIe:the link of RC%d is suspending.\n",
4582 dev->rc_idx);
4583 } else {
4584 dev->link_status = MSM_PCIE_LINK_DISABLED;
4585 dev->shadow_en = false;
4586
4587 if (dev->linkdown_panic)
4588 panic("User has chosen to panic on linkdown\n");
4589
4590 /* assert PERST */
4591 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4592 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4593 PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
4594
4595 if (dev->num_ep > 1) {
4596 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4597 dev->event_reg =
4598 dev->pcidev_table[i].event_reg;
4599 msm_pcie_notify_client(dev,
4600 MSM_PCIE_EVENT_LINKDOWN);
4601 }
4602 } else {
4603 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
4604 }
4605 }
4606
4607 spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
4608
4609 return IRQ_HANDLED;
4610}
4611
4612static irqreturn_t handle_msi_irq(int irq, void *data)
4613{
4614 int i, j;
4615 unsigned long val;
4616 struct msm_pcie_dev_t *dev = data;
4617 void __iomem *ctrl_status;
4618
4619 PCIE_DUMP(dev, "irq: %d\n", irq);
4620
4621 /*
4622 * check for set bits, clear it by setting that bit
4623 * and trigger corresponding irq
4624 */
4625 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
4626 ctrl_status = dev->dm_core +
4627 PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
4628
4629 val = readl_relaxed(ctrl_status);
4630 while (val) {
4631 j = find_first_bit(&val, 32);
4632 writel_relaxed(BIT(j), ctrl_status);
4633 /* ensure that interrupt is cleared (acked) */
4634 wmb();
4635 generic_handle_irq(
4636 irq_find_mapping(dev->irq_domain, (j + (32*i)))
4637 );
4638 val = readl_relaxed(ctrl_status);
4639 }
4640 }
4641
4642 return IRQ_HANDLED;
4643}
4644
4645static irqreturn_t handle_global_irq(int irq, void *data)
4646{
4647 int i;
4648 struct msm_pcie_dev_t *dev = data;
4649 unsigned long irqsave_flags;
4650 u32 status = 0;
4651
4652 spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
4653
4654 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
4655 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
4656
4657 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
4658
4659 PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
4660 dev->rc_idx, irq, status);
4661
4662 for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
4663 if (status & BIT(i)) {
4664 switch (i) {
4665 case MSM_PCIE_INT_EVT_LINK_DOWN:
4666 PCIE_DBG(dev,
4667 "PCIe: RC%d: handle linkdown event.\n",
4668 dev->rc_idx);
4669 handle_linkdown_irq(irq, data);
4670 break;
4671 case MSM_PCIE_INT_EVT_AER_LEGACY:
4672 PCIE_DBG(dev,
4673 "PCIe: RC%d: AER legacy event.\n",
4674 dev->rc_idx);
4675 handle_aer_irq(irq, data);
4676 break;
4677 case MSM_PCIE_INT_EVT_AER_ERR:
4678 PCIE_DBG(dev,
4679 "PCIe: RC%d: AER event.\n",
4680 dev->rc_idx);
4681 handle_aer_irq(irq, data);
4682 break;
4683 default:
Tony Truong3f110d42017-04-07 17:12:23 -07004684 PCIE_DUMP(dev,
Tony Truong349ee492014-10-01 17:35:56 -07004685 "PCIe: RC%d: Unexpected event %d is caught!\n",
4686 dev->rc_idx, i);
4687 }
4688 }
4689 }
4690
4691 spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
4692
4693 return IRQ_HANDLED;
4694}
4695
Tony Truong52122a62017-03-23 18:00:34 -07004696static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
4697 struct pci_dev *pdev)
4698{
4699 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
4700 int bypass_en = 0;
4701
4702 if (!domain) {
4703 PCIE_DBG(dev,
4704 "PCIe: RC%d: client does not have an iommu domain\n",
4705 dev->rc_idx);
4706 return;
4707 }
4708
4709 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
4710 if (!bypass_en) {
4711 int ret;
4712 phys_addr_t pcie_base_addr =
4713 dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
4714 dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
4715
4716 ret = iommu_unmap(domain, iova, PAGE_SIZE);
4717 if (ret != PAGE_SIZE)
4718 PCIE_ERR(dev,
4719 "PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
4720 dev->rc_idx, ret);
4721 }
4722}
4723
Stephen Boydb5b8fc32017-06-21 08:59:11 -07004724static void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev)
Tony Truong349ee492014-10-01 17:35:56 -07004725{
Tony Truongc3c52ae2017-03-29 12:16:51 -07004726 int pos;
Tony Truongc3c52ae2017-03-29 12:16:51 -07004727 struct msi_desc *entry = irq_get_msi_desc(irq);
4728 struct msi_desc *firstentry;
Tony Truong349ee492014-10-01 17:35:56 -07004729 struct msm_pcie_dev_t *dev;
Tony Truongc3c52ae2017-03-29 12:16:51 -07004730 u32 nvec;
4731 int firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07004732
Tony Truongb09d0e82017-06-02 13:37:36 -07004733 if (!pdev)
4734 pdev = irq_get_chip_data(irq);
4735
Tony Truongc3c52ae2017-03-29 12:16:51 -07004736 if (!pdev) {
4737 pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
Tony Truong349ee492014-10-01 17:35:56 -07004738 return;
4739 }
4740
Tony Truongc3c52ae2017-03-29 12:16:51 -07004741 dev = PCIE_BUS_PRIV_DATA(pdev->bus);
4742 if (!dev) {
4743 pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
4744 return;
4745 }
4746
4747 if (!entry) {
4748 PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
4749 dev->rc_idx, irq);
4750 return;
4751 }
4752
4753 firstentry = first_pci_msi_entry(pdev);
4754 if (!firstentry) {
4755 PCIE_ERR(dev,
4756 "PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
4757 dev->rc_idx, irq);
4758 return;
4759 }
4760
4761 firstirq = firstentry->irq;
4762 nvec = (1 << entry->msi_attrib.multiple);
4763
Tony Truong349ee492014-10-01 17:35:56 -07004764 if (dev->msi_gicm_addr) {
4765 PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
4766
Tony Truongc3c52ae2017-03-29 12:16:51 -07004767 if (irq < firstirq || irq > firstirq + nvec - 1) {
Tony Truong349ee492014-10-01 17:35:56 -07004768 PCIE_ERR(dev,
4769 "Could not find irq: %d in RC%d MSI table\n",
4770 irq, dev->rc_idx);
4771 return;
4772 }
Tony Truong52122a62017-03-23 18:00:34 -07004773 if (irq == firstirq + nvec - 1)
4774 msm_pcie_unmap_qgic_addr(dev, pdev);
Tony Truongc3c52ae2017-03-29 12:16:51 -07004775 pos = irq - firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07004776 } else {
4777 PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
4778 pos = irq - irq_find_mapping(dev->irq_domain, 0);
4779 }
4780
4781 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4782
4783 PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
4784 pos, *dev->msi_irq_in_use);
4785 clear_bit(pos, dev->msi_irq_in_use);
4786 PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
4787 pos, *dev->msi_irq_in_use);
4788}
4789
4790/* hookup to linux pci msi framework */
4791void arch_teardown_msi_irq(unsigned int irq)
4792{
4793 PCIE_GEN_DBG("irq %d deallocated\n", irq);
Tony Truongb09d0e82017-06-02 13:37:36 -07004794 msm_pcie_destroy_irq(irq, NULL);
Tony Truong349ee492014-10-01 17:35:56 -07004795}
4796
4797void arch_teardown_msi_irqs(struct pci_dev *dev)
4798{
4799 struct msi_desc *entry;
4800 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
4801
4802 PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
4803 pcie_dev->rc_idx, dev->vendor, dev->device);
4804
4805 pcie_dev->use_msi = false;
4806
4807 list_for_each_entry(entry, &dev->dev.msi_list, list) {
4808 int i, nvec;
4809
4810 if (entry->irq == 0)
4811 continue;
4812 nvec = 1 << entry->msi_attrib.multiple;
4813 for (i = 0; i < nvec; i++)
Tony Truongb09d0e82017-06-02 13:37:36 -07004814 msm_pcie_destroy_irq(entry->irq + i, dev);
Tony Truong349ee492014-10-01 17:35:56 -07004815 }
4816}
4817
4818static void msm_pcie_msi_nop(struct irq_data *d)
4819{
4820}
4821
4822static struct irq_chip pcie_msi_chip = {
4823 .name = "msm-pcie-msi",
4824 .irq_ack = msm_pcie_msi_nop,
4825 .irq_enable = unmask_msi_irq,
4826 .irq_disable = mask_msi_irq,
4827 .irq_mask = mask_msi_irq,
4828 .irq_unmask = unmask_msi_irq,
4829};
4830
4831static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
4832{
4833 int irq, pos;
4834
4835 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4836
4837again:
4838 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
4839
4840 if (pos >= PCIE_MSI_NR_IRQS)
4841 return -ENOSPC;
4842
4843 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
4844
4845 if (test_and_set_bit(pos, dev->msi_irq_in_use))
4846 goto again;
4847 else
4848 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
4849
4850 irq = irq_create_mapping(dev->irq_domain, pos);
4851 if (!irq)
4852 return -EINVAL;
4853
4854 return irq;
4855}
4856
4857static int arch_setup_msi_irq_default(struct pci_dev *pdev,
4858 struct msi_desc *desc, int nvec)
4859{
4860 int irq;
4861 struct msi_msg msg;
4862 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
4863
4864 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4865
4866 irq = msm_pcie_create_irq(dev);
4867
4868 PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
4869
4870 if (irq < 0)
4871 return irq;
4872
4873 PCIE_DBG(dev, "irq %d allocated\n", irq);
4874
Tony Truongc3c52ae2017-03-29 12:16:51 -07004875 irq_set_chip_data(irq, pdev);
Tony Truong349ee492014-10-01 17:35:56 -07004876 irq_set_msi_desc(irq, desc);
4877
4878 /* write msi vector and data */
4879 msg.address_hi = 0;
4880 msg.address_lo = MSM_PCIE_MSI_PHY;
4881 msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
4882 write_msi_msg(irq, &msg);
4883
4884 return 0;
4885}
4886
4887static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
4888{
4889 int irq, pos;
4890
4891 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4892
4893again:
4894 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
4895
4896 if (pos >= PCIE_MSI_NR_IRQS)
4897 return -ENOSPC;
4898
4899 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
4900
4901 if (test_and_set_bit(pos, dev->msi_irq_in_use))
4902 goto again;
4903 else
4904 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
4905
4906 if (pos >= MSM_PCIE_MAX_MSI) {
4907 PCIE_ERR(dev,
4908 "PCIe: RC%d: pos %d is not less than %d\n",
4909 dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
4910 return MSM_PCIE_ERROR;
4911 }
4912
4913 irq = dev->msi[pos].num;
4914 if (!irq) {
4915 PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
4916 dev->rc_idx);
4917 return -EINVAL;
4918 }
4919
4920 return irq;
4921}
4922
Tony Truong52122a62017-03-23 18:00:34 -07004923static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
4924 struct pci_dev *pdev,
4925 struct msi_msg *msg)
4926{
4927 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
Tony Truong39a13792017-04-07 18:33:10 -07004928 struct iommu_domain_geometry geometry;
4929 int ret, fastmap_en = 0, bypass_en = 0;
Tony Truong52122a62017-03-23 18:00:34 -07004930 dma_addr_t iova;
Tony Truong39a13792017-04-07 18:33:10 -07004931 phys_addr_t gicm_db_offset;
Tony Truong52122a62017-03-23 18:00:34 -07004932
4933 msg->address_hi = 0;
4934 msg->address_lo = dev->msi_gicm_addr;
4935
4936 if (!domain) {
4937 PCIE_DBG(dev,
4938 "PCIe: RC%d: client does not have an iommu domain\n",
4939 dev->rc_idx);
4940 return 0;
4941 }
4942
4943 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
4944
4945 PCIE_DBG(dev,
4946 "PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
4947 dev->rc_idx, bypass_en ? "bypass" : "enabled",
4948 pdev->bus->number, pdev->devfn);
4949
4950 if (bypass_en)
4951 return 0;
4952
Tony Truong39a13792017-04-07 18:33:10 -07004953 iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap_en);
4954 if (fastmap_en) {
4955 iommu_domain_get_attr(domain, DOMAIN_ATTR_GEOMETRY, &geometry);
4956 iova = geometry.aperture_start;
4957 PCIE_DBG(dev,
4958 "PCIe: RC%d: Use client's IOVA 0x%llx to map QGIC MSI address\n",
4959 dev->rc_idx, iova);
4960 } else {
4961 phys_addr_t pcie_base_addr;
4962
4963 /*
4964 * Use PCIe DBI address as the IOVA since client cannot
4965 * use this address for their IOMMU mapping. This will
4966 * prevent any conflicts between PCIe host and
4967 * client's mapping.
4968 */
4969 pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
4970 iova = rounddown(pcie_base_addr, PAGE_SIZE);
4971 }
Tony Truong52122a62017-03-23 18:00:34 -07004972
4973 ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
4974 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
4975 if (ret < 0) {
4976 PCIE_ERR(dev,
4977 "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
4978 dev->rc_idx, ret);
4979 return -ENOMEM;
4980 }
4981
Tony Truong39a13792017-04-07 18:33:10 -07004982 gicm_db_offset = dev->msi_gicm_addr -
4983 rounddown(dev->msi_gicm_addr, PAGE_SIZE);
Tony Truong52122a62017-03-23 18:00:34 -07004984 msg->address_lo = iova + gicm_db_offset;
4985
4986 return 0;
4987}
4988
Tony Truong349ee492014-10-01 17:35:56 -07004989static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
4990 struct msi_desc *desc, int nvec)
4991{
Tony Truong52122a62017-03-23 18:00:34 -07004992 int irq, index, ret, firstirq = 0;
Tony Truong349ee492014-10-01 17:35:56 -07004993 struct msi_msg msg;
4994 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
4995
4996 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4997
4998 for (index = 0; index < nvec; index++) {
4999 irq = msm_pcie_create_irq_qgic(dev);
5000 PCIE_DBG(dev, "irq %d is allocated\n", irq);
5001
5002 if (irq < 0)
5003 return irq;
5004
5005 if (index == 0)
5006 firstirq = irq;
5007
5008 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
5009 }
5010
5011 /* write msi vector and data */
5012 irq_set_msi_desc(firstirq, desc);
Tony Truong52122a62017-03-23 18:00:34 -07005013
5014 ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
5015 if (ret)
5016 return ret;
5017
Tony Truong349ee492014-10-01 17:35:56 -07005018 msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
5019 write_msi_msg(firstirq, &msg);
5020
5021 return 0;
5022}
5023
5024int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
5025{
5026 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5027
5028 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5029
5030 if (dev->msi_gicm_addr)
5031 return arch_setup_msi_irq_qgic(pdev, desc, 1);
5032 else
5033 return arch_setup_msi_irq_default(pdev, desc, 1);
5034}
5035
5036static int msm_pcie_get_msi_multiple(int nvec)
5037{
5038 int msi_multiple = 0;
5039
5040 while (nvec) {
5041 nvec = nvec >> 1;
5042 msi_multiple++;
5043 }
5044 PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
5045 msi_multiple - 1);
5046
5047 return msi_multiple - 1;
5048}
5049
5050int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
5051{
5052 struct msi_desc *entry;
5053 int ret;
5054 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5055
5056 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5057
5058 if (type != PCI_CAP_ID_MSI || nvec > 32)
5059 return -ENOSPC;
5060
5061 PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
5062
5063 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5064 entry->msi_attrib.multiple =
5065 msm_pcie_get_msi_multiple(nvec);
5066
5067 if (pcie_dev->msi_gicm_addr)
5068 ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
5069 else
5070 ret = arch_setup_msi_irq_default(dev, entry, nvec);
5071
5072 PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
5073
5074 if (ret < 0)
5075 return ret;
5076 if (ret > 0)
5077 return -ENOSPC;
5078 }
5079
5080 pcie_dev->use_msi = true;
5081
5082 return 0;
5083}
5084
5085static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
5086 irq_hw_number_t hwirq)
5087{
5088 irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
Tony Truong349ee492014-10-01 17:35:56 -07005089 return 0;
5090}
5091
5092static const struct irq_domain_ops msm_pcie_msi_ops = {
5093 .map = msm_pcie_msi_map,
5094};
5095
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005096static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07005097{
5098 int rc;
5099 int msi_start = 0;
5100 struct device *pdev = &dev->pdev->dev;
5101
5102 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5103
5104 if (dev->rc_idx)
5105 wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
5106 else
5107 wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
5108
5109 /* register handler for linkdown interrupt */
5110 if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
5111 rc = devm_request_irq(pdev,
5112 dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
5113 handle_linkdown_irq,
5114 IRQF_TRIGGER_RISING,
5115 dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
5116 dev);
5117 if (rc) {
5118 PCIE_ERR(dev,
5119 "PCIe: Unable to request linkdown interrupt:%d\n",
5120 dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
5121 return rc;
5122 }
5123 }
5124
5125 /* register handler for physical MSI interrupt line */
5126 if (dev->irq[MSM_PCIE_INT_MSI].num) {
5127 rc = devm_request_irq(pdev,
5128 dev->irq[MSM_PCIE_INT_MSI].num,
5129 handle_msi_irq,
5130 IRQF_TRIGGER_RISING,
5131 dev->irq[MSM_PCIE_INT_MSI].name,
5132 dev);
5133 if (rc) {
5134 PCIE_ERR(dev,
5135 "PCIe: RC%d: Unable to request MSI interrupt\n",
5136 dev->rc_idx);
5137 return rc;
5138 }
5139 }
5140
5141 /* register handler for AER interrupt */
5142 if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
5143 rc = devm_request_irq(pdev,
5144 dev->irq[MSM_PCIE_INT_PLS_ERR].num,
5145 handle_aer_irq,
5146 IRQF_TRIGGER_RISING,
5147 dev->irq[MSM_PCIE_INT_PLS_ERR].name,
5148 dev);
5149 if (rc) {
5150 PCIE_ERR(dev,
5151 "PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
5152 dev->rc_idx,
5153 dev->irq[MSM_PCIE_INT_PLS_ERR].num);
5154 return rc;
5155 }
5156 }
5157
5158 /* register handler for AER legacy interrupt */
5159 if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
5160 rc = devm_request_irq(pdev,
5161 dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
5162 handle_aer_irq,
5163 IRQF_TRIGGER_RISING,
5164 dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
5165 dev);
5166 if (rc) {
5167 PCIE_ERR(dev,
5168 "PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
5169 dev->rc_idx,
5170 dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
5171 return rc;
5172 }
5173 }
5174
5175 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
5176 rc = devm_request_irq(pdev,
5177 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
5178 handle_global_irq,
5179 IRQF_TRIGGER_RISING,
5180 dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
5181 dev);
5182 if (rc) {
5183 PCIE_ERR(dev,
5184 "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
5185 dev->rc_idx,
5186 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
5187 return rc;
5188 }
5189 }
5190
5191 /* register handler for PCIE_WAKE_N interrupt line */
5192 if (dev->wake_n) {
5193 rc = devm_request_irq(pdev,
5194 dev->wake_n, handle_wake_irq,
5195 IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
5196 if (rc) {
5197 PCIE_ERR(dev,
5198 "PCIe: RC%d: Unable to request wake interrupt\n",
5199 dev->rc_idx);
5200 return rc;
5201 }
5202
5203 INIT_WORK(&dev->handle_wake_work, handle_wake_func);
5204
5205 rc = enable_irq_wake(dev->wake_n);
5206 if (rc) {
5207 PCIE_ERR(dev,
5208 "PCIe: RC%d: Unable to enable wake interrupt\n",
5209 dev->rc_idx);
5210 return rc;
5211 }
5212 }
5213
5214 /* Create a virtual domain of interrupts */
5215 if (!dev->msi_gicm_addr) {
5216 dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
5217 PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
5218
5219 if (!dev->irq_domain) {
5220 PCIE_ERR(dev,
5221 "PCIe: RC%d: Unable to initialize irq domain\n",
5222 dev->rc_idx);
5223
5224 if (dev->wake_n)
5225 disable_irq(dev->wake_n);
5226
5227 return PTR_ERR(dev->irq_domain);
5228 }
5229
5230 msi_start = irq_create_mapping(dev->irq_domain, 0);
5231 }
5232
5233 return 0;
5234}
5235
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005236static void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
Tony Truong349ee492014-10-01 17:35:56 -07005237{
5238 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5239
5240 wakeup_source_trash(&dev->ws);
5241
5242 if (dev->wake_n)
5243 disable_irq(dev->wake_n);
5244}
5245
Tony Truong7772e692017-04-13 17:03:34 -07005246static int msm_pcie_config_device(struct pci_dev *dev, void *pdev)
5247{
5248 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)pdev;
5249 u8 busnr = dev->bus->number;
5250 u8 slot = PCI_SLOT(dev->devfn);
5251 u8 func = PCI_FUNC(dev->devfn);
5252
5253 PCIE_DBG(pcie_dev, "PCIe: RC%d: configure PCI device %02x:%02x.%01x\n",
5254 pcie_dev->rc_idx, busnr, slot, func);
5255
Tony Truong2a022a02017-04-13 14:04:30 -07005256 msm_pcie_configure_sid(pcie_dev, dev);
5257
Tony Truong7772e692017-04-13 17:03:34 -07005258 return 0;
5259}
5260
5261/* Hook to setup PCI device during PCI framework scan */
5262int pcibios_add_device(struct pci_dev *dev)
5263{
5264 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5265
5266 return msm_pcie_config_device(dev, pcie_dev);
5267}
Tony Truong349ee492014-10-01 17:35:56 -07005268
5269static int msm_pcie_probe(struct platform_device *pdev)
5270{
5271 int ret = 0;
5272 int rc_idx = -1;
5273 int i, j;
5274
5275 PCIE_GEN_DBG("%s\n", __func__);
5276
5277 mutex_lock(&pcie_drv.drv_lock);
5278
5279 ret = of_property_read_u32((&pdev->dev)->of_node,
5280 "cell-index", &rc_idx);
5281 if (ret) {
5282 PCIE_GEN_DBG("Did not find RC index.\n");
5283 goto out;
5284 } else {
5285 if (rc_idx >= MAX_RC_NUM) {
5286 pr_err(
5287 "PCIe: Invalid RC Index %d (max supported = %d)\n",
5288 rc_idx, MAX_RC_NUM);
5289 goto out;
5290 }
5291 pcie_drv.rc_num++;
5292 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
5293 rc_idx);
5294 }
5295
5296 msm_pcie_dev[rc_idx].l0s_supported =
5297 of_property_read_bool((&pdev->dev)->of_node,
5298 "qcom,l0s-supported");
5299 PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
5300 msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
5301 msm_pcie_dev[rc_idx].l1_supported =
5302 of_property_read_bool((&pdev->dev)->of_node,
5303 "qcom,l1-supported");
5304 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
5305 msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
5306 msm_pcie_dev[rc_idx].l1ss_supported =
5307 of_property_read_bool((&pdev->dev)->of_node,
5308 "qcom,l1ss-supported");
5309 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
5310 msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
5311 msm_pcie_dev[rc_idx].common_clk_en =
5312 of_property_read_bool((&pdev->dev)->of_node,
5313 "qcom,common-clk-en");
5314 PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
5315 msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
5316 msm_pcie_dev[rc_idx].clk_power_manage_en =
5317 of_property_read_bool((&pdev->dev)->of_node,
5318 "qcom,clk-power-manage-en");
5319 PCIE_DBG(&msm_pcie_dev[rc_idx],
5320 "Clock power management is %s enabled.\n",
5321 msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
5322 msm_pcie_dev[rc_idx].aux_clk_sync =
5323 of_property_read_bool((&pdev->dev)->of_node,
5324 "qcom,aux-clk-sync");
5325 PCIE_DBG(&msm_pcie_dev[rc_idx],
5326 "AUX clock is %s synchronous to Core clock.\n",
5327 msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
5328
5329 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
5330 of_property_read_bool((&pdev->dev)->of_node,
5331 "qcom,use-19p2mhz-aux-clk");
5332 PCIE_DBG(&msm_pcie_dev[rc_idx],
5333 "AUX clock frequency is %s 19.2MHz.\n",
5334 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
5335
5336 msm_pcie_dev[rc_idx].smmu_exist =
5337 of_property_read_bool((&pdev->dev)->of_node,
5338 "qcom,smmu-exist");
5339 PCIE_DBG(&msm_pcie_dev[rc_idx],
5340 "SMMU does %s exist.\n",
5341 msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
5342
5343 msm_pcie_dev[rc_idx].smmu_sid_base = 0;
5344 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
5345 &msm_pcie_dev[rc_idx].smmu_sid_base);
5346 if (ret)
5347 PCIE_DBG(&msm_pcie_dev[rc_idx],
5348 "RC%d SMMU sid base not found\n",
5349 msm_pcie_dev[rc_idx].rc_idx);
5350 else
5351 PCIE_DBG(&msm_pcie_dev[rc_idx],
5352 "RC%d: qcom,smmu-sid-base: 0x%x.\n",
5353 msm_pcie_dev[rc_idx].rc_idx,
5354 msm_pcie_dev[rc_idx].smmu_sid_base);
5355
Tony Truong9f2c7722017-02-28 15:02:27 -08005356 msm_pcie_dev[rc_idx].boot_option = 0;
5357 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,boot-option",
5358 &msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07005359 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08005360 "PCIe: RC%d boot option is 0x%x.\n",
5361 rc_idx, msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07005362
5363 msm_pcie_dev[rc_idx].phy_ver = 1;
5364 ret = of_property_read_u32((&pdev->dev)->of_node,
5365 "qcom,pcie-phy-ver",
5366 &msm_pcie_dev[rc_idx].phy_ver);
5367 if (ret)
5368 PCIE_DBG(&msm_pcie_dev[rc_idx],
5369 "RC%d: pcie-phy-ver does not exist.\n",
5370 msm_pcie_dev[rc_idx].rc_idx);
5371 else
5372 PCIE_DBG(&msm_pcie_dev[rc_idx],
5373 "RC%d: pcie-phy-ver: %d.\n",
5374 msm_pcie_dev[rc_idx].rc_idx,
5375 msm_pcie_dev[rc_idx].phy_ver);
5376
5377 msm_pcie_dev[rc_idx].n_fts = 0;
5378 ret = of_property_read_u32((&pdev->dev)->of_node,
5379 "qcom,n-fts",
5380 &msm_pcie_dev[rc_idx].n_fts);
5381
5382 if (ret)
5383 PCIE_DBG(&msm_pcie_dev[rc_idx],
5384 "n-fts does not exist. ret=%d\n", ret);
5385 else
5386 PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
5387 msm_pcie_dev[rc_idx].n_fts);
5388
5389 msm_pcie_dev[rc_idx].common_phy =
5390 of_property_read_bool((&pdev->dev)->of_node,
5391 "qcom,common-phy");
5392 PCIE_DBG(&msm_pcie_dev[rc_idx],
5393 "PCIe: RC%d: Common PHY does %s exist.\n",
5394 rc_idx, msm_pcie_dev[rc_idx].common_phy ? "" : "not");
5395
5396 msm_pcie_dev[rc_idx].ext_ref_clk =
5397 of_property_read_bool((&pdev->dev)->of_node,
5398 "qcom,ext-ref-clk");
5399 PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
5400 msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
5401
5402 msm_pcie_dev[rc_idx].ep_latency = 0;
5403 ret = of_property_read_u32((&pdev->dev)->of_node,
5404 "qcom,ep-latency",
5405 &msm_pcie_dev[rc_idx].ep_latency);
5406 if (ret)
5407 PCIE_DBG(&msm_pcie_dev[rc_idx],
5408 "RC%d: ep-latency does not exist.\n",
5409 rc_idx);
5410 else
5411 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
5412 rc_idx, msm_pcie_dev[rc_idx].ep_latency);
5413
5414 msm_pcie_dev[rc_idx].wr_halt_size = 0;
5415 ret = of_property_read_u32(pdev->dev.of_node,
5416 "qcom,wr-halt-size",
5417 &msm_pcie_dev[rc_idx].wr_halt_size);
5418 if (ret)
5419 PCIE_DBG(&msm_pcie_dev[rc_idx],
5420 "RC%d: wr-halt-size not specified in dt. Use default value.\n",
5421 rc_idx);
5422 else
5423 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
5424 rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
5425
5426 msm_pcie_dev[rc_idx].cpl_timeout = 0;
5427 ret = of_property_read_u32((&pdev->dev)->of_node,
5428 "qcom,cpl-timeout",
5429 &msm_pcie_dev[rc_idx].cpl_timeout);
5430 if (ret)
5431 PCIE_DBG(&msm_pcie_dev[rc_idx],
5432 "RC%d: Using default cpl-timeout.\n",
5433 rc_idx);
5434 else
5435 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
5436 rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
5437
5438 msm_pcie_dev[rc_idx].perst_delay_us_min =
5439 PERST_PROPAGATION_DELAY_US_MIN;
5440 ret = of_property_read_u32(pdev->dev.of_node,
5441 "qcom,perst-delay-us-min",
5442 &msm_pcie_dev[rc_idx].perst_delay_us_min);
5443 if (ret)
5444 PCIE_DBG(&msm_pcie_dev[rc_idx],
5445 "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
5446 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
5447 else
5448 PCIE_DBG(&msm_pcie_dev[rc_idx],
5449 "RC%d: perst-delay-us-min: %dus.\n",
5450 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
5451
5452 msm_pcie_dev[rc_idx].perst_delay_us_max =
5453 PERST_PROPAGATION_DELAY_US_MAX;
5454 ret = of_property_read_u32(pdev->dev.of_node,
5455 "qcom,perst-delay-us-max",
5456 &msm_pcie_dev[rc_idx].perst_delay_us_max);
5457 if (ret)
5458 PCIE_DBG(&msm_pcie_dev[rc_idx],
5459 "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
5460 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
5461 else
5462 PCIE_DBG(&msm_pcie_dev[rc_idx],
5463 "RC%d: perst-delay-us-max: %dus.\n",
5464 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
5465
5466 msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
5467 ret = of_property_read_u32(pdev->dev.of_node,
5468 "qcom,tlp-rd-size",
5469 &msm_pcie_dev[rc_idx].tlp_rd_size);
5470 if (ret)
5471 PCIE_DBG(&msm_pcie_dev[rc_idx],
5472 "RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
5473 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
5474 else
5475 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
5476 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
5477
5478 msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
5479 msm_pcie_dev[rc_idx].msi_gicm_base = 0;
5480 ret = of_property_read_u32((&pdev->dev)->of_node,
5481 "qcom,msi-gicm-addr",
5482 &msm_pcie_dev[rc_idx].msi_gicm_addr);
5483
5484 if (ret) {
5485 PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
5486 "msi-gicm-addr does not exist.\n");
5487 } else {
5488 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
5489 msm_pcie_dev[rc_idx].msi_gicm_addr);
5490
5491 ret = of_property_read_u32((&pdev->dev)->of_node,
5492 "qcom,msi-gicm-base",
5493 &msm_pcie_dev[rc_idx].msi_gicm_base);
5494
5495 if (ret) {
5496 PCIE_ERR(&msm_pcie_dev[rc_idx],
5497 "PCIe: RC%d: msi-gicm-base does not exist.\n",
5498 rc_idx);
5499 goto decrease_rc_num;
5500 } else {
5501 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
5502 msm_pcie_dev[rc_idx].msi_gicm_base);
5503 }
5504 }
5505
5506 msm_pcie_dev[rc_idx].scm_dev_id = 0;
5507 ret = of_property_read_u32((&pdev->dev)->of_node,
5508 "qcom,scm-dev-id",
5509 &msm_pcie_dev[rc_idx].scm_dev_id);
5510
5511 msm_pcie_dev[rc_idx].rc_idx = rc_idx;
5512 msm_pcie_dev[rc_idx].pdev = pdev;
5513 msm_pcie_dev[rc_idx].vreg_n = 0;
5514 msm_pcie_dev[rc_idx].gpio_n = 0;
5515 msm_pcie_dev[rc_idx].parf_deemph = 0;
5516 msm_pcie_dev[rc_idx].parf_swing = 0;
5517 msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
5518 msm_pcie_dev[rc_idx].user_suspend = false;
5519 msm_pcie_dev[rc_idx].disable_pc = false;
5520 msm_pcie_dev[rc_idx].saved_state = NULL;
5521 msm_pcie_dev[rc_idx].enumerated = false;
5522 msm_pcie_dev[rc_idx].num_active_ep = 0;
5523 msm_pcie_dev[rc_idx].num_ep = 0;
5524 msm_pcie_dev[rc_idx].pending_ep_reg = false;
5525 msm_pcie_dev[rc_idx].phy_len = 0;
5526 msm_pcie_dev[rc_idx].port_phy_len = 0;
5527 msm_pcie_dev[rc_idx].phy_sequence = NULL;
5528 msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
5529 msm_pcie_dev[rc_idx].event_reg = NULL;
5530 msm_pcie_dev[rc_idx].linkdown_counter = 0;
5531 msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
5532 msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
5533 msm_pcie_dev[rc_idx].rc_corr_counter = 0;
5534 msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
5535 msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
5536 msm_pcie_dev[rc_idx].ep_corr_counter = 0;
5537 msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
5538 msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
5539 msm_pcie_dev[rc_idx].suspending = false;
5540 msm_pcie_dev[rc_idx].wake_counter = 0;
5541 msm_pcie_dev[rc_idx].aer_enable = true;
5542 msm_pcie_dev[rc_idx].power_on = false;
Tony Truong349ee492014-10-01 17:35:56 -07005543 msm_pcie_dev[rc_idx].use_msi = false;
5544 msm_pcie_dev[rc_idx].use_pinctrl = false;
5545 msm_pcie_dev[rc_idx].linkdown_panic = false;
5546 msm_pcie_dev[rc_idx].bridge_found = false;
5547 memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
5548 sizeof(msm_pcie_vreg_info));
5549 memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
5550 sizeof(msm_pcie_gpio_info));
5551 memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
5552 sizeof(msm_pcie_clk_info[rc_idx]));
5553 memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
5554 sizeof(msm_pcie_pipe_clk_info[rc_idx]));
5555 memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
5556 sizeof(msm_pcie_res_info));
5557 memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
5558 sizeof(msm_pcie_irq_info));
5559 memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
5560 sizeof(msm_pcie_msi_info));
5561 memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
5562 sizeof(msm_pcie_reset_info[rc_idx]));
5563 memcpy(msm_pcie_dev[rc_idx].pipe_reset,
5564 msm_pcie_pipe_reset_info[rc_idx],
5565 sizeof(msm_pcie_pipe_reset_info[rc_idx]));
5566 msm_pcie_dev[rc_idx].shadow_en = true;
5567 for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
5568 msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
5569 for (i = 0; i < MAX_DEVICE_NUM; i++)
5570 for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
5571 msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
5572 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5573 msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
5574 msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
5575 msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
5576 msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
5577 msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005578 msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07005579 msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
5580 msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
5581 msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
5582 msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
5583 }
5584
Tony Truongbd9a3412017-02-27 18:30:13 -08005585 dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
Tony Truongbd9a3412017-02-27 18:30:13 -08005586
Tony Truong349ee492014-10-01 17:35:56 -07005587 ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
5588 msm_pcie_dev[rc_idx].pdev);
5589
5590 if (ret)
5591 goto decrease_rc_num;
5592
5593 msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
5594 if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
5595 PCIE_ERR(&msm_pcie_dev[rc_idx],
5596 "PCIe: RC%d failed to get pinctrl\n",
5597 rc_idx);
5598 else
5599 msm_pcie_dev[rc_idx].use_pinctrl = true;
5600
5601 if (msm_pcie_dev[rc_idx].use_pinctrl) {
5602 msm_pcie_dev[rc_idx].pins_default =
5603 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
5604 "default");
5605 if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
5606 PCIE_ERR(&msm_pcie_dev[rc_idx],
5607 "PCIe: RC%d could not get pinctrl default state\n",
5608 rc_idx);
5609 msm_pcie_dev[rc_idx].pins_default = NULL;
5610 }
5611
5612 msm_pcie_dev[rc_idx].pins_sleep =
5613 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
5614 "sleep");
5615 if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
5616 PCIE_ERR(&msm_pcie_dev[rc_idx],
5617 "PCIe: RC%d could not get pinctrl sleep state\n",
5618 rc_idx);
5619 msm_pcie_dev[rc_idx].pins_sleep = NULL;
5620 }
5621 }
5622
5623 ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
5624 if (ret) {
5625 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
5626 goto decrease_rc_num;
5627 }
5628
5629 ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
5630 if (ret) {
5631 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
5632 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
5633 goto decrease_rc_num;
5634 }
5635
Tony Truong14a5ddf2017-04-20 11:04:03 -07005636 msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
5637
Tony Truong349ee492014-10-01 17:35:56 -07005638 msm_pcie_dev[rc_idx].drv_ready = true;
5639
Tony Truong9f2c7722017-02-28 15:02:27 -08005640 if (msm_pcie_dev[rc_idx].boot_option &
5641 MSM_PCIE_NO_PROBE_ENUMERATION) {
Tony Truong349ee492014-10-01 17:35:56 -07005642 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08005643 "PCIe: RC%d will be enumerated by client or endpoint.\n",
Tony Truong349ee492014-10-01 17:35:56 -07005644 rc_idx);
5645 mutex_unlock(&pcie_drv.drv_lock);
5646 return 0;
5647 }
5648
5649 ret = msm_pcie_enumerate(rc_idx);
5650
5651 if (ret)
5652 PCIE_ERR(&msm_pcie_dev[rc_idx],
5653 "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
5654 rc_idx);
5655 else
5656 PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
5657 rc_idx);
5658
5659 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
5660 dev_name(&(pdev->dev)));
5661
5662 mutex_unlock(&pcie_drv.drv_lock);
5663 return 0;
5664
5665decrease_rc_num:
5666 pcie_drv.rc_num--;
5667out:
5668 if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
5669 pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
5670 rc_idx);
5671 else
5672 PCIE_ERR(&msm_pcie_dev[rc_idx],
5673 "PCIe: Driver probe failed for RC%d:%d\n",
5674 rc_idx, ret);
5675
5676 mutex_unlock(&pcie_drv.drv_lock);
5677
5678 return ret;
5679}
5680
5681static int msm_pcie_remove(struct platform_device *pdev)
5682{
5683 int ret = 0;
5684 int rc_idx;
5685
5686 PCIE_GEN_DBG("PCIe:%s.\n", __func__);
5687
5688 mutex_lock(&pcie_drv.drv_lock);
5689
5690 ret = of_property_read_u32((&pdev->dev)->of_node,
5691 "cell-index", &rc_idx);
5692 if (ret) {
5693 pr_err("%s: Did not find RC index.\n", __func__);
5694 goto out;
5695 } else {
5696 pcie_drv.rc_num--;
5697 PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
5698 }
5699
5700 msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
5701 msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
5702 msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
5703 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
5704 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
5705
5706out:
5707 mutex_unlock(&pcie_drv.drv_lock);
5708
5709 return ret;
5710}
5711
5712static const struct of_device_id msm_pcie_match[] = {
5713 { .compatible = "qcom,pci-msm",
5714 },
5715 {}
5716};
5717
5718static struct platform_driver msm_pcie_driver = {
5719 .probe = msm_pcie_probe,
5720 .remove = msm_pcie_remove,
5721 .driver = {
5722 .name = "pci-msm",
5723 .owner = THIS_MODULE,
5724 .of_match_table = msm_pcie_match,
5725 },
5726};
5727
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005728static int __init pcie_init(void)
Tony Truong349ee492014-10-01 17:35:56 -07005729{
5730 int ret = 0, i;
5731 char rc_name[MAX_RC_NAME_LEN];
5732
5733 pr_alert("pcie:%s.\n", __func__);
5734
5735 pcie_drv.rc_num = 0;
5736 mutex_init(&pcie_drv.drv_lock);
5737 mutex_init(&com_phy_lock);
5738
5739 for (i = 0; i < MAX_RC_NUM; i++) {
5740 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
5741 msm_pcie_dev[i].ipc_log =
5742 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
5743 if (msm_pcie_dev[i].ipc_log == NULL)
5744 pr_err("%s: unable to create IPC log context for %s\n",
5745 __func__, rc_name);
5746 else
5747 PCIE_DBG(&msm_pcie_dev[i],
5748 "PCIe IPC logging is enable for RC%d\n",
5749 i);
5750 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
5751 msm_pcie_dev[i].ipc_log_long =
5752 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
5753 if (msm_pcie_dev[i].ipc_log_long == NULL)
5754 pr_err("%s: unable to create IPC log context for %s\n",
5755 __func__, rc_name);
5756 else
5757 PCIE_DBG(&msm_pcie_dev[i],
5758 "PCIe IPC logging %s is enable for RC%d\n",
5759 rc_name, i);
5760 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
5761 msm_pcie_dev[i].ipc_log_dump =
5762 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
5763 if (msm_pcie_dev[i].ipc_log_dump == NULL)
5764 pr_err("%s: unable to create IPC log context for %s\n",
5765 __func__, rc_name);
5766 else
5767 PCIE_DBG(&msm_pcie_dev[i],
5768 "PCIe IPC logging %s is enable for RC%d\n",
5769 rc_name, i);
5770 spin_lock_init(&msm_pcie_dev[i].cfg_lock);
5771 msm_pcie_dev[i].cfg_access = true;
5772 mutex_init(&msm_pcie_dev[i].enumerate_lock);
5773 mutex_init(&msm_pcie_dev[i].setup_lock);
5774 mutex_init(&msm_pcie_dev[i].recovery_lock);
5775 spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
5776 spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
5777 spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
5778 spin_lock_init(&msm_pcie_dev[i].aer_lock);
5779 msm_pcie_dev[i].drv_ready = false;
5780 }
5781 for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
5782 msm_pcie_dev_tbl[i].bdf = 0;
5783 msm_pcie_dev_tbl[i].dev = NULL;
5784 msm_pcie_dev_tbl[i].short_bdf = 0;
5785 msm_pcie_dev_tbl[i].sid = 0;
5786 msm_pcie_dev_tbl[i].domain = -1;
Stephen Boydb5b8fc32017-06-21 08:59:11 -07005787 msm_pcie_dev_tbl[i].conf_base = NULL;
Tony Truong349ee492014-10-01 17:35:56 -07005788 msm_pcie_dev_tbl[i].phy_address = 0;
5789 msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
5790 msm_pcie_dev_tbl[i].event_reg = NULL;
5791 msm_pcie_dev_tbl[i].registered = true;
5792 }
5793
5794 msm_pcie_debugfs_init();
5795
5796 ret = platform_driver_register(&msm_pcie_driver);
5797
5798 return ret;
5799}
5800
5801static void __exit pcie_exit(void)
5802{
Tony Truongbd9a3412017-02-27 18:30:13 -08005803 int i;
5804
Tony Truong349ee492014-10-01 17:35:56 -07005805 PCIE_GEN_DBG("pcie:%s.\n", __func__);
5806
5807 platform_driver_unregister(&msm_pcie_driver);
5808
5809 msm_pcie_debugfs_exit();
Tony Truongbd9a3412017-02-27 18:30:13 -08005810
5811 for (i = 0; i < MAX_RC_NUM; i++)
5812 msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
Tony Truong349ee492014-10-01 17:35:56 -07005813}
5814
5815subsys_initcall_sync(pcie_init);
5816module_exit(pcie_exit);
5817
5818
5819/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
5820static void msm_pcie_fixup_early(struct pci_dev *dev)
5821{
5822 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5823
5824 PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
5825 if (dev->hdr_type == 1)
5826 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
5827}
5828DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
5829 msm_pcie_fixup_early);
5830
5831/* Suspend the PCIe link */
5832static int msm_pcie_pm_suspend(struct pci_dev *dev,
5833 void *user, void *data, u32 options)
5834{
5835 int ret = 0;
5836 u32 val = 0;
5837 int ret_l23;
5838 unsigned long irqsave_flags;
5839 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5840
5841 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
5842
5843 spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
5844 pcie_dev->suspending = true;
5845 spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
5846
5847 if (!pcie_dev->power_on) {
5848 PCIE_DBG(pcie_dev,
5849 "PCIe: power of RC%d has been turned off.\n",
5850 pcie_dev->rc_idx);
5851 return ret;
5852 }
5853
5854 if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
5855 && msm_pcie_confirm_linkup(pcie_dev, true, true,
5856 pcie_dev->conf)) {
5857 ret = pci_save_state(dev);
5858 pcie_dev->saved_state = pci_store_saved_state(dev);
5859 }
5860 if (ret) {
5861 PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
5862 pcie_dev->rc_idx, ret);
5863 pcie_dev->suspending = false;
5864 return ret;
5865 }
5866
5867 spin_lock_irqsave(&pcie_dev->cfg_lock,
5868 pcie_dev->irqsave_flags);
5869 pcie_dev->cfg_access = false;
5870 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
5871 pcie_dev->irqsave_flags);
5872
5873 msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
5874 BIT(4));
5875
5876 PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
5877 pcie_dev->rc_idx);
5878
5879 ret_l23 = readl_poll_timeout((pcie_dev->parf
5880 + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
5881
5882 /* check L23_Ready */
5883 PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
5884 pcie_dev->rc_idx,
5885 readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
5886 if (!ret_l23)
5887 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
5888 pcie_dev->rc_idx);
5889 else
5890 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
5891 pcie_dev->rc_idx);
5892
Tony Truong349ee492014-10-01 17:35:56 -07005893 if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
5894 pinctrl_select_state(pcie_dev->pinctrl,
5895 pcie_dev->pins_sleep);
5896
Tony Truong4e969782017-04-28 18:17:04 -07005897 msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
5898
Tony Truong349ee492014-10-01 17:35:56 -07005899 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
5900
5901 return ret;
5902}
5903
5904static void msm_pcie_fixup_suspend(struct pci_dev *dev)
5905{
5906 int ret;
5907 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5908
5909 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5910
5911 if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
5912 return;
5913
5914 spin_lock_irqsave(&pcie_dev->cfg_lock,
5915 pcie_dev->irqsave_flags);
5916 if (pcie_dev->disable_pc) {
5917 PCIE_DBG(pcie_dev,
5918 "RC%d: Skip suspend because of user request\n",
5919 pcie_dev->rc_idx);
5920 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
5921 pcie_dev->irqsave_flags);
5922 return;
5923 }
5924 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
5925 pcie_dev->irqsave_flags);
5926
5927 mutex_lock(&pcie_dev->recovery_lock);
5928
5929 ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
5930 if (ret)
5931 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
5932 pcie_dev->rc_idx, ret);
5933
5934 mutex_unlock(&pcie_dev->recovery_lock);
5935}
5936DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
5937 msm_pcie_fixup_suspend);
5938
5939/* Resume the PCIe link */
5940static int msm_pcie_pm_resume(struct pci_dev *dev,
5941 void *user, void *data, u32 options)
5942{
5943 int ret;
5944 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5945
5946 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
5947
5948 if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
5949 pinctrl_select_state(pcie_dev->pinctrl,
5950 pcie_dev->pins_default);
5951
5952 spin_lock_irqsave(&pcie_dev->cfg_lock,
5953 pcie_dev->irqsave_flags);
5954 pcie_dev->cfg_access = true;
5955 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
5956 pcie_dev->irqsave_flags);
5957
5958 ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
5959 if (ret) {
5960 PCIE_ERR(pcie_dev,
5961 "PCIe: RC%d fail to enable PCIe link in resume.\n",
5962 pcie_dev->rc_idx);
5963 return ret;
5964 }
5965
5966 pcie_dev->suspending = false;
5967 PCIE_DBG(pcie_dev,
5968 "dev->bus->number = %d dev->bus->primary = %d\n",
5969 dev->bus->number, dev->bus->primary);
5970
5971 if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
5972 PCIE_DBG(pcie_dev,
5973 "RC%d: entry of PCI framework restore state\n",
5974 pcie_dev->rc_idx);
5975
5976 pci_load_and_free_saved_state(dev,
5977 &pcie_dev->saved_state);
5978 pci_restore_state(dev);
5979
5980 PCIE_DBG(pcie_dev,
5981 "RC%d: exit of PCI framework restore state\n",
5982 pcie_dev->rc_idx);
5983 }
5984
5985 if (pcie_dev->bridge_found) {
5986 PCIE_DBG(pcie_dev,
5987 "RC%d: entry of PCIe recover config\n",
5988 pcie_dev->rc_idx);
5989
5990 msm_pcie_recover_config(dev);
5991
5992 PCIE_DBG(pcie_dev,
5993 "RC%d: exit of PCIe recover config\n",
5994 pcie_dev->rc_idx);
5995 }
5996
5997 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
5998
5999 return ret;
6000}
6001
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006002static void msm_pcie_fixup_resume(struct pci_dev *dev)
Tony Truong349ee492014-10-01 17:35:56 -07006003{
6004 int ret;
6005 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6006
6007 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6008
6009 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6010 pcie_dev->user_suspend)
6011 return;
6012
6013 mutex_lock(&pcie_dev->recovery_lock);
6014 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6015 if (ret)
6016 PCIE_ERR(pcie_dev,
6017 "PCIe: RC%d got failure in fixup resume:%d.\n",
6018 pcie_dev->rc_idx, ret);
6019
6020 mutex_unlock(&pcie_dev->recovery_lock);
6021}
6022DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6023 msm_pcie_fixup_resume);
6024
Stephen Boydb5b8fc32017-06-21 08:59:11 -07006025static void msm_pcie_fixup_resume_early(struct pci_dev *dev)
Tony Truong349ee492014-10-01 17:35:56 -07006026{
6027 int ret;
6028 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6029
6030 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6031
6032 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6033 pcie_dev->user_suspend)
6034 return;
6035
6036 mutex_lock(&pcie_dev->recovery_lock);
6037 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6038 if (ret)
6039 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
6040 pcie_dev->rc_idx, ret);
6041
6042 mutex_unlock(&pcie_dev->recovery_lock);
6043}
6044DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6045 msm_pcie_fixup_resume_early);
6046
6047int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
6048 void *data, u32 options)
6049{
6050 int i, ret = 0;
6051 struct pci_dev *dev;
6052 u32 rc_idx = 0;
6053 struct msm_pcie_dev_t *pcie_dev;
6054
6055 PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
6056 pm_opt, busnr, options);
6057
6058
6059 if (!user) {
6060 pr_err("PCIe: endpoint device is NULL\n");
6061 ret = -ENODEV;
6062 goto out;
6063 }
6064
6065 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
6066
6067 if (pcie_dev) {
6068 rc_idx = pcie_dev->rc_idx;
6069 PCIE_DBG(pcie_dev,
6070 "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
6071 rc_idx, pm_opt, busnr, options);
6072 } else {
6073 pr_err(
6074 "PCIe: did not find RC for pci endpoint device.\n"
6075 );
6076 ret = -ENODEV;
6077 goto out;
6078 }
6079
6080 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6081 if (!busnr)
6082 break;
6083 if (user == pcie_dev->pcidev_table[i].dev) {
6084 if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
6085 break;
6086
6087 PCIE_ERR(pcie_dev,
6088 "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
6089 pcie_dev->rc_idx, busnr,
6090 pcie_dev->pcidev_table[i].bdf >> 24);
6091 ret = MSM_PCIE_ERROR;
6092 goto out;
6093 }
6094 }
6095
6096 if (i == MAX_DEVICE_NUM) {
6097 PCIE_ERR(pcie_dev,
6098 "PCIe: RC%d: endpoint device was not found in device table",
6099 pcie_dev->rc_idx);
6100 ret = MSM_PCIE_ERROR;
6101 goto out;
6102 }
6103
6104 dev = msm_pcie_dev[rc_idx].dev;
6105
6106 if (!msm_pcie_dev[rc_idx].drv_ready) {
6107 PCIE_ERR(&msm_pcie_dev[rc_idx],
6108 "RC%d has not been successfully probed yet\n",
6109 rc_idx);
6110 return -EPROBE_DEFER;
6111 }
6112
6113 switch (pm_opt) {
6114 case MSM_PCIE_SUSPEND:
6115 PCIE_DBG(&msm_pcie_dev[rc_idx],
6116 "User of RC%d requests to suspend the link\n", rc_idx);
6117 if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
6118 PCIE_DBG(&msm_pcie_dev[rc_idx],
6119 "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
6120 rc_idx, msm_pcie_dev[rc_idx].link_status);
6121
6122 if (!msm_pcie_dev[rc_idx].power_on) {
6123 PCIE_ERR(&msm_pcie_dev[rc_idx],
6124 "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
6125 rc_idx, msm_pcie_dev[rc_idx].link_status);
6126 break;
6127 }
6128
6129 if (msm_pcie_dev[rc_idx].pending_ep_reg) {
6130 PCIE_DBG(&msm_pcie_dev[rc_idx],
6131 "PCIe: RC%d: request to suspend the link is rejected\n",
6132 rc_idx);
6133 break;
6134 }
6135
6136 if (pcie_dev->num_active_ep) {
6137 PCIE_DBG(pcie_dev,
6138 "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
6139 pcie_dev->rc_idx, pcie_dev->num_active_ep);
6140 return ret;
6141 }
6142
6143 msm_pcie_dev[rc_idx].user_suspend = true;
6144
6145 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6146
6147 ret = msm_pcie_pm_suspend(dev, user, data, options);
6148 if (ret) {
6149 PCIE_ERR(&msm_pcie_dev[rc_idx],
6150 "PCIe: RC%d: user failed to suspend the link.\n",
6151 rc_idx);
6152 msm_pcie_dev[rc_idx].user_suspend = false;
6153 }
6154
6155 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6156 break;
6157 case MSM_PCIE_RESUME:
6158 PCIE_DBG(&msm_pcie_dev[rc_idx],
6159 "User of RC%d requests to resume the link\n", rc_idx);
6160 if (msm_pcie_dev[rc_idx].link_status !=
6161 MSM_PCIE_LINK_DISABLED) {
6162 PCIE_ERR(&msm_pcie_dev[rc_idx],
6163 "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
6164 rc_idx, msm_pcie_dev[rc_idx].link_status,
6165 msm_pcie_dev[rc_idx].num_active_ep);
6166 break;
6167 }
6168
6169 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6170 ret = msm_pcie_pm_resume(dev, user, data, options);
6171 if (ret) {
6172 PCIE_ERR(&msm_pcie_dev[rc_idx],
6173 "PCIe: RC%d: user failed to resume the link.\n",
6174 rc_idx);
6175 } else {
6176 PCIE_DBG(&msm_pcie_dev[rc_idx],
6177 "PCIe: RC%d: user succeeded to resume the link.\n",
6178 rc_idx);
6179
6180 msm_pcie_dev[rc_idx].user_suspend = false;
6181 }
6182
6183 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6184
6185 break;
6186 case MSM_PCIE_DISABLE_PC:
6187 PCIE_DBG(&msm_pcie_dev[rc_idx],
6188 "User of RC%d requests to keep the link always alive.\n",
6189 rc_idx);
6190 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6191 msm_pcie_dev[rc_idx].irqsave_flags);
6192 if (msm_pcie_dev[rc_idx].suspending) {
6193 PCIE_ERR(&msm_pcie_dev[rc_idx],
6194 "PCIe: RC%d Link has been suspended before request\n",
6195 rc_idx);
6196 ret = MSM_PCIE_ERROR;
6197 } else {
6198 msm_pcie_dev[rc_idx].disable_pc = true;
6199 }
6200 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6201 msm_pcie_dev[rc_idx].irqsave_flags);
6202 break;
6203 case MSM_PCIE_ENABLE_PC:
6204 PCIE_DBG(&msm_pcie_dev[rc_idx],
6205 "User of RC%d cancels the request of alive link.\n",
6206 rc_idx);
6207 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6208 msm_pcie_dev[rc_idx].irqsave_flags);
6209 msm_pcie_dev[rc_idx].disable_pc = false;
6210 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6211 msm_pcie_dev[rc_idx].irqsave_flags);
6212 break;
6213 default:
6214 PCIE_ERR(&msm_pcie_dev[rc_idx],
6215 "PCIe: RC%d: unsupported pm operation:%d.\n",
6216 rc_idx, pm_opt);
6217 ret = -ENODEV;
6218 goto out;
6219 }
6220
6221out:
6222 return ret;
6223}
6224EXPORT_SYMBOL(msm_pcie_pm_control);
6225
6226int msm_pcie_register_event(struct msm_pcie_register_event *reg)
6227{
6228 int i, ret = 0;
6229 struct msm_pcie_dev_t *pcie_dev;
6230
6231 if (!reg) {
6232 pr_err("PCIe: Event registration is NULL\n");
6233 return -ENODEV;
6234 }
6235
6236 if (!reg->user) {
6237 pr_err("PCIe: User of event registration is NULL\n");
6238 return -ENODEV;
6239 }
6240
6241 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6242
6243 if (!pcie_dev) {
6244 PCIE_ERR(pcie_dev, "%s",
6245 "PCIe: did not find RC for pci endpoint device.\n");
6246 return -ENODEV;
6247 }
6248
6249 if (pcie_dev->num_ep > 1) {
6250 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6251 if (reg->user ==
6252 pcie_dev->pcidev_table[i].dev) {
6253 pcie_dev->event_reg =
6254 pcie_dev->pcidev_table[i].event_reg;
6255
6256 if (!pcie_dev->event_reg) {
6257 pcie_dev->pcidev_table[i].registered =
6258 true;
6259
6260 pcie_dev->num_active_ep++;
6261 PCIE_DBG(pcie_dev,
6262 "PCIe: RC%d: number of active EP(s): %d.\n",
6263 pcie_dev->rc_idx,
6264 pcie_dev->num_active_ep);
6265 }
6266
6267 pcie_dev->event_reg = reg;
6268 pcie_dev->pcidev_table[i].event_reg = reg;
6269 PCIE_DBG(pcie_dev,
6270 "Event 0x%x is registered for RC %d\n",
6271 reg->events,
6272 pcie_dev->rc_idx);
6273
6274 break;
6275 }
6276 }
6277
6278 if (pcie_dev->pending_ep_reg) {
6279 for (i = 0; i < MAX_DEVICE_NUM; i++)
6280 if (!pcie_dev->pcidev_table[i].registered)
6281 break;
6282
6283 if (i == MAX_DEVICE_NUM)
6284 pcie_dev->pending_ep_reg = false;
6285 }
6286 } else {
6287 pcie_dev->event_reg = reg;
6288 PCIE_DBG(pcie_dev,
6289 "Event 0x%x is registered for RC %d\n", reg->events,
6290 pcie_dev->rc_idx);
6291 }
6292
6293 return ret;
6294}
6295EXPORT_SYMBOL(msm_pcie_register_event);
6296
6297int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
6298{
6299 int i, ret = 0;
6300 struct msm_pcie_dev_t *pcie_dev;
6301
6302 if (!reg) {
6303 pr_err("PCIe: Event deregistration is NULL\n");
6304 return -ENODEV;
6305 }
6306
6307 if (!reg->user) {
6308 pr_err("PCIe: User of event deregistration is NULL\n");
6309 return -ENODEV;
6310 }
6311
6312 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6313
6314 if (!pcie_dev) {
6315 PCIE_ERR(pcie_dev, "%s",
6316 "PCIe: did not find RC for pci endpoint device.\n");
6317 return -ENODEV;
6318 }
6319
6320 if (pcie_dev->num_ep > 1) {
6321 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6322 if (reg->user == pcie_dev->pcidev_table[i].dev) {
6323 if (pcie_dev->pcidev_table[i].event_reg) {
6324 pcie_dev->num_active_ep--;
6325 PCIE_DBG(pcie_dev,
6326 "PCIe: RC%d: number of active EP(s) left: %d.\n",
6327 pcie_dev->rc_idx,
6328 pcie_dev->num_active_ep);
6329 }
6330
6331 pcie_dev->event_reg = NULL;
6332 pcie_dev->pcidev_table[i].event_reg = NULL;
6333 PCIE_DBG(pcie_dev,
6334 "Event is deregistered for RC %d\n",
6335 pcie_dev->rc_idx);
6336
6337 break;
6338 }
6339 }
6340 } else {
6341 pcie_dev->event_reg = NULL;
6342 PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
6343 pcie_dev->rc_idx);
6344 }
6345
6346 return ret;
6347}
6348EXPORT_SYMBOL(msm_pcie_deregister_event);
6349
6350int msm_pcie_recover_config(struct pci_dev *dev)
6351{
6352 int ret = 0;
6353 struct msm_pcie_dev_t *pcie_dev;
6354
6355 if (dev) {
6356 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6357 PCIE_DBG(pcie_dev,
6358 "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
6359 } else {
6360 pr_err("PCIe: the input pci dev is NULL.\n");
6361 return -ENODEV;
6362 }
6363
6364 if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
6365 PCIE_DBG(pcie_dev,
6366 "Recover config space of RC%d and its EP\n",
6367 pcie_dev->rc_idx);
6368 pcie_dev->shadow_en = false;
6369 PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
6370 msm_pcie_cfg_recover(pcie_dev, true);
6371 PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
6372 msm_pcie_cfg_recover(pcie_dev, false);
6373 PCIE_DBG(pcie_dev,
6374 "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
6375 pcie_dev->rc_idx);
6376 pci_save_state(pcie_dev->dev);
6377 pci_save_state(dev);
6378 pcie_dev->shadow_en = true;
6379 PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
6380 pcie_dev->rc_idx);
6381 } else {
6382 PCIE_ERR(pcie_dev,
6383 "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
6384 pcie_dev->rc_idx);
6385 ret = -ENODEV;
6386 }
6387
6388 return ret;
6389}
6390EXPORT_SYMBOL(msm_pcie_recover_config);
6391
6392int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
6393{
6394 int ret = 0;
6395 struct msm_pcie_dev_t *pcie_dev;
6396
6397 if (dev) {
6398 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6399 PCIE_DBG(pcie_dev,
6400 "User requests to %s shadow\n",
6401 enable ? "enable" : "disable");
6402 } else {
6403 pr_err("PCIe: the input pci dev is NULL.\n");
6404 return -ENODEV;
6405 }
6406
6407 PCIE_DBG(pcie_dev,
6408 "The shadowing of RC%d is %s enabled currently.\n",
6409 pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
6410
6411 pcie_dev->shadow_en = enable;
6412
6413 PCIE_DBG(pcie_dev,
6414 "Shadowing of RC%d is turned %s upon user's request.\n",
6415 pcie_dev->rc_idx, enable ? "on" : "off");
6416
6417 return ret;
6418}
6419EXPORT_SYMBOL(msm_pcie_shadow_control);