blob: 292b17604fb9b9f750bde511e103862a531b01f7 [file] [log] [blame]
Tony Truong349ee492014-10-01 17:35:56 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#include <linux/module.h>
18#include <linux/bitops.h>
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/gpio.h>
23#include <linux/iopoll.h>
24#include <linux/kernel.h>
25#include <linux/of_pci.h>
26#include <linux/pci.h>
Tony Truong52122a62017-03-23 18:00:34 -070027#include <linux/iommu.h>
Tony Truong349ee492014-10-01 17:35:56 -070028#include <linux/platform_device.h>
29#include <linux/regulator/consumer.h>
Tony Truongb213ac12017-04-05 15:21:20 -070030#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
Tony Truong349ee492014-10-01 17:35:56 -070031#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/of_gpio.h>
Tony Truongb213ac12017-04-05 15:21:20 -070034#include <linux/clk/qcom.h>
Tony Truong349ee492014-10-01 17:35:56 -070035#include <linux/reset.h>
36#include <linux/msm-bus.h>
37#include <linux/msm-bus-board.h>
38#include <linux/debugfs.h>
39#include <linux/uaccess.h>
40#include <linux/io.h>
41#include <linux/msi.h>
42#include <linux/interrupt.h>
43#include <linux/irq.h>
44#include <linux/irqdomain.h>
45#include <linux/pm_wakeup.h>
46#include <linux/compiler.h>
47#include <soc/qcom/scm.h>
48#include <linux/ipc_logging.h>
49#include <linux/msm_pcie.h>
50
Tony Truongb213ac12017-04-05 15:21:20 -070051#define PCIE_VENDOR_ID_RCP 0x17cb
52#define PCIE_DEVICE_ID_RCP 0x0106
53
54#define PCIE20_L1SUB_CONTROL1 0x1E4
55#define PCIE20_PARF_DBI_BASE_ADDR 0x350
56#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
57
Tony Truongb213ac12017-04-05 15:21:20 -070058#define PCS_BASE 0x800
Tony Truongb213ac12017-04-05 15:21:20 -070059
Tony Truong349ee492014-10-01 17:35:56 -070060#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
Tony Truong349ee492014-10-01 17:35:56 -070061
62#define PCIE_N_SW_RESET(n, m) (PCS_PORT(n, m) + 0x00)
63#define PCIE_N_POWER_DOWN_CONTROL(n, m) (PCS_PORT(n, m) + 0x04)
Tony Truong349ee492014-10-01 17:35:56 -070064#define PCIE_N_PCS_STATUS(n, m) (PCS_PORT(n, m) + 0x174)
Tony Truong349ee492014-10-01 17:35:56 -070065
66#define PCIE_COM_SW_RESET 0x400
67#define PCIE_COM_POWER_DOWN_CONTROL 0x404
Tony Truong349ee492014-10-01 17:35:56 -070068#define PCIE_COM_PCS_READY_STATUS 0x448
Tony Truong349ee492014-10-01 17:35:56 -070069
70#define PCIE20_PARF_SYS_CTRL 0x00
Tony Truongb213ac12017-04-05 15:21:20 -070071#define PCIE20_PARF_PM_CTRL 0x20
Tony Truong349ee492014-10-01 17:35:56 -070072#define PCIE20_PARF_PM_STTS 0x24
73#define PCIE20_PARF_PCS_DEEMPH 0x34
74#define PCIE20_PARF_PCS_SWING 0x38
75#define PCIE20_PARF_PHY_CTRL 0x40
76#define PCIE20_PARF_PHY_REFCLK 0x4C
77#define PCIE20_PARF_CONFIG_BITS 0x50
78#define PCIE20_PARF_TEST_BUS 0xE4
79#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
80#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x1A8
81#define PCIE20_PARF_LTSSM 0x1B0
82#define PCIE20_PARF_INT_ALL_STATUS 0x224
83#define PCIE20_PARF_INT_ALL_CLEAR 0x228
84#define PCIE20_PARF_INT_ALL_MASK 0x22C
85#define PCIE20_PARF_SID_OFFSET 0x234
86#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
87#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
Tony Truongb213ac12017-04-05 15:21:20 -070088#define PCIE20_PARF_DEVICE_TYPE 0x1000
Tony Truong349ee492014-10-01 17:35:56 -070089
90#define PCIE20_ELBI_VERSION 0x00
91#define PCIE20_ELBI_SYS_CTRL 0x04
92#define PCIE20_ELBI_SYS_STTS 0x08
93
94#define PCIE20_CAP 0x70
95#define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
96#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
97
98#define PCIE20_COMMAND_STATUS 0x04
99#define PCIE20_HEADER_TYPE 0x0C
100#define PCIE20_BUSNUMBERS 0x18
101#define PCIE20_MEMORY_BASE_LIMIT 0x20
102#define PCIE20_BRIDGE_CTRL 0x3C
103#define PCIE20_DEVICE_CONTROL_STATUS 0x78
104#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
105
106#define PCIE20_AUX_CLK_FREQ_REG 0xB40
107#define PCIE20_ACK_F_ASPM_CTRL_REG 0x70C
108#define PCIE20_ACK_N_FTS 0xff00
109
110#define PCIE20_PLR_IATU_VIEWPORT 0x900
111#define PCIE20_PLR_IATU_CTRL1 0x904
112#define PCIE20_PLR_IATU_CTRL2 0x908
113#define PCIE20_PLR_IATU_LBAR 0x90C
114#define PCIE20_PLR_IATU_UBAR 0x910
115#define PCIE20_PLR_IATU_LAR 0x914
116#define PCIE20_PLR_IATU_LTAR 0x918
117#define PCIE20_PLR_IATU_UTAR 0x91c
118
119#define PCIE20_CTRL1_TYPE_CFG0 0x04
120#define PCIE20_CTRL1_TYPE_CFG1 0x05
121
122#define PCIE20_CAP_ID 0x10
123#define L1SUB_CAP_ID 0x1E
124
125#define PCIE_CAP_PTR_OFFSET 0x34
126#define PCIE_EXT_CAP_OFFSET 0x100
127
128#define PCIE20_AER_UNCORR_ERR_STATUS_REG 0x104
129#define PCIE20_AER_CORR_ERR_STATUS_REG 0x110
130#define PCIE20_AER_ROOT_ERR_STATUS_REG 0x130
131#define PCIE20_AER_ERR_SRC_ID_REG 0x134
132
133#define RD 0
134#define WR 1
135#define MSM_PCIE_ERROR -1
136
137#define PERST_PROPAGATION_DELAY_US_MIN 1000
138#define PERST_PROPAGATION_DELAY_US_MAX 1005
139#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
140#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
141#define LINK_UP_TIMEOUT_US_MIN 5000
142#define LINK_UP_TIMEOUT_US_MAX 5100
143#define LINK_UP_CHECK_MAX_COUNT 20
144#define PHY_STABILIZATION_DELAY_US_MIN 995
145#define PHY_STABILIZATION_DELAY_US_MAX 1005
146#define POWER_DOWN_DELAY_US_MIN 10
147#define POWER_DOWN_DELAY_US_MAX 11
148#define LINKDOWN_INIT_WAITING_US_MIN 995
149#define LINKDOWN_INIT_WAITING_US_MAX 1005
150#define LINKDOWN_WAITING_US_MIN 4900
151#define LINKDOWN_WAITING_US_MAX 5100
152#define LINKDOWN_WAITING_COUNT 200
153
154#define PHY_READY_TIMEOUT_COUNT 10
155#define XMLH_LINK_UP 0x400
156#define MAX_LINK_RETRIES 5
157#define MAX_BUS_NUM 3
158#define MAX_PROP_SIZE 32
159#define MAX_RC_NAME_LEN 15
160#define MSM_PCIE_MAX_VREG 4
Tony Truongb213ac12017-04-05 15:21:20 -0700161#define MSM_PCIE_MAX_CLK 12
Tony Truong349ee492014-10-01 17:35:56 -0700162#define MSM_PCIE_MAX_PIPE_CLK 1
163#define MAX_RC_NUM 3
164#define MAX_DEVICE_NUM 20
165#define MAX_SHORT_BDF_NUM 16
166#define PCIE_TLP_RD_SIZE 0x5
167#define PCIE_MSI_NR_IRQS 256
168#define MSM_PCIE_MAX_MSI 32
169#define MAX_MSG_LEN 80
170#define PCIE_LOG_PAGES (50)
171#define PCIE_CONF_SPACE_DW 1024
172#define PCIE_CLEAR 0xDEADBEEF
173#define PCIE_LINK_DOWN 0xFFFFFFFF
174
Tony Truongb213ac12017-04-05 15:21:20 -0700175#define MSM_PCIE_MAX_RESET 5
Tony Truong349ee492014-10-01 17:35:56 -0700176#define MSM_PCIE_MAX_PIPE_RESET 1
177
178#define MSM_PCIE_MSI_PHY 0xa0000000
179#define PCIE20_MSI_CTRL_ADDR (0x820)
180#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
181#define PCIE20_MSI_CTRL_INTR_EN (0x828)
182#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
183#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
184#define PCIE20_MSI_CTRL_MAX 8
185
186/* PM control options */
187#define PM_IRQ 0x1
188#define PM_CLK 0x2
189#define PM_GPIO 0x4
190#define PM_VREG 0x8
191#define PM_PIPE_CLK 0x10
192#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
193
194#ifdef CONFIG_PHYS_ADDR_T_64BIT
195#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
196#else
197#define PCIE_UPPER_ADDR(addr) (0x0)
198#endif
199#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
200
201/* Config Space Offsets */
202#define BDF_OFFSET(bus, devfn) \
203 ((bus << 24) | (devfn << 16))
204
205#define PCIE_GEN_DBG(x...) do { \
206 if (msm_pcie_debug_mask) \
207 pr_alert(x); \
208 } while (0)
209
210#define PCIE_DBG(dev, fmt, arg...) do { \
211 if ((dev) && (dev)->ipc_log_long) \
212 ipc_log_string((dev)->ipc_log_long, \
213 "DBG1:%s: " fmt, __func__, arg); \
214 if ((dev) && (dev)->ipc_log) \
215 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
216 if (msm_pcie_debug_mask) \
217 pr_alert("%s: " fmt, __func__, arg); \
218 } while (0)
219
220#define PCIE_DBG2(dev, fmt, arg...) do { \
221 if ((dev) && (dev)->ipc_log) \
222 ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
223 if (msm_pcie_debug_mask) \
224 pr_alert("%s: " fmt, __func__, arg); \
225 } while (0)
226
227#define PCIE_DBG3(dev, fmt, arg...) do { \
228 if ((dev) && (dev)->ipc_log) \
229 ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
230 if (msm_pcie_debug_mask) \
231 pr_alert("%s: " fmt, __func__, arg); \
232 } while (0)
233
234#define PCIE_DUMP(dev, fmt, arg...) do { \
235 if ((dev) && (dev)->ipc_log_dump) \
236 ipc_log_string((dev)->ipc_log_dump, \
237 "DUMP:%s: " fmt, __func__, arg); \
238 } while (0)
239
240#define PCIE_DBG_FS(dev, fmt, arg...) do { \
241 if ((dev) && (dev)->ipc_log_dump) \
242 ipc_log_string((dev)->ipc_log_dump, \
243 "DBG_FS:%s: " fmt, __func__, arg); \
244 pr_alert("%s: " fmt, __func__, arg); \
245 } while (0)
246
247#define PCIE_INFO(dev, fmt, arg...) do { \
248 if ((dev) && (dev)->ipc_log_long) \
249 ipc_log_string((dev)->ipc_log_long, \
250 "INFO:%s: " fmt, __func__, arg); \
251 if ((dev) && (dev)->ipc_log) \
252 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
253 pr_info("%s: " fmt, __func__, arg); \
254 } while (0)
255
256#define PCIE_ERR(dev, fmt, arg...) do { \
257 if ((dev) && (dev)->ipc_log_long) \
258 ipc_log_string((dev)->ipc_log_long, \
259 "ERR:%s: " fmt, __func__, arg); \
260 if ((dev) && (dev)->ipc_log) \
261 ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
262 pr_err("%s: " fmt, __func__, arg); \
263 } while (0)
264
265
266enum msm_pcie_res {
267 MSM_PCIE_RES_PARF,
268 MSM_PCIE_RES_PHY,
269 MSM_PCIE_RES_DM_CORE,
270 MSM_PCIE_RES_ELBI,
271 MSM_PCIE_RES_CONF,
272 MSM_PCIE_RES_IO,
273 MSM_PCIE_RES_BARS,
274 MSM_PCIE_RES_TCSR,
275 MSM_PCIE_MAX_RES,
276};
277
278enum msm_pcie_irq {
279 MSM_PCIE_INT_MSI,
280 MSM_PCIE_INT_A,
281 MSM_PCIE_INT_B,
282 MSM_PCIE_INT_C,
283 MSM_PCIE_INT_D,
284 MSM_PCIE_INT_PLS_PME,
285 MSM_PCIE_INT_PME_LEGACY,
286 MSM_PCIE_INT_PLS_ERR,
287 MSM_PCIE_INT_AER_LEGACY,
288 MSM_PCIE_INT_LINK_UP,
289 MSM_PCIE_INT_LINK_DOWN,
290 MSM_PCIE_INT_BRIDGE_FLUSH_N,
291 MSM_PCIE_INT_GLOBAL_INT,
292 MSM_PCIE_MAX_IRQ,
293};
294
295enum msm_pcie_irq_event {
296 MSM_PCIE_INT_EVT_LINK_DOWN = 1,
297 MSM_PCIE_INT_EVT_BME,
298 MSM_PCIE_INT_EVT_PM_TURNOFF,
299 MSM_PCIE_INT_EVT_DEBUG,
300 MSM_PCIE_INT_EVT_LTR,
301 MSM_PCIE_INT_EVT_MHI_Q6,
302 MSM_PCIE_INT_EVT_MHI_A7,
303 MSM_PCIE_INT_EVT_DSTATE_CHANGE,
304 MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
305 MSM_PCIE_INT_EVT_MMIO_WRITE,
306 MSM_PCIE_INT_EVT_CFG_WRITE,
307 MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
308 MSM_PCIE_INT_EVT_LINK_UP,
309 MSM_PCIE_INT_EVT_AER_LEGACY,
310 MSM_PCIE_INT_EVT_AER_ERR,
311 MSM_PCIE_INT_EVT_PME_LEGACY,
312 MSM_PCIE_INT_EVT_PLS_PME,
313 MSM_PCIE_INT_EVT_INTD,
314 MSM_PCIE_INT_EVT_INTC,
315 MSM_PCIE_INT_EVT_INTB,
316 MSM_PCIE_INT_EVT_INTA,
317 MSM_PCIE_INT_EVT_EDMA,
318 MSM_PCIE_INT_EVT_MSI_0,
319 MSM_PCIE_INT_EVT_MSI_1,
320 MSM_PCIE_INT_EVT_MSI_2,
321 MSM_PCIE_INT_EVT_MSI_3,
322 MSM_PCIE_INT_EVT_MSI_4,
323 MSM_PCIE_INT_EVT_MSI_5,
324 MSM_PCIE_INT_EVT_MSI_6,
325 MSM_PCIE_INT_EVT_MSI_7,
326 MSM_PCIE_INT_EVT_MAX = 30,
327};
328
329enum msm_pcie_gpio {
330 MSM_PCIE_GPIO_PERST,
331 MSM_PCIE_GPIO_WAKE,
332 MSM_PCIE_GPIO_EP,
333 MSM_PCIE_MAX_GPIO
334};
335
336enum msm_pcie_link_status {
337 MSM_PCIE_LINK_DEINIT,
338 MSM_PCIE_LINK_ENABLED,
339 MSM_PCIE_LINK_DISABLED
340};
341
Tony Truong9f2c7722017-02-28 15:02:27 -0800342enum msm_pcie_boot_option {
343 MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
344 MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
345};
346
Tony Truong349ee492014-10-01 17:35:56 -0700347/* gpio info structure */
348struct msm_pcie_gpio_info_t {
349 char *name;
350 uint32_t num;
351 bool out;
352 uint32_t on;
353 uint32_t init;
354 bool required;
355};
356
357/* voltage regulator info structrue */
358struct msm_pcie_vreg_info_t {
359 struct regulator *hdl;
360 char *name;
361 uint32_t max_v;
362 uint32_t min_v;
363 uint32_t opt_mode;
364 bool required;
365};
366
367/* reset info structure */
368struct msm_pcie_reset_info_t {
369 struct reset_control *hdl;
370 char *name;
371 bool required;
372};
373
374/* clock info structure */
375struct msm_pcie_clk_info_t {
376 struct clk *hdl;
377 char *name;
378 u32 freq;
379 bool config_mem;
380 bool required;
381};
382
383/* resource info structure */
384struct msm_pcie_res_info_t {
385 char *name;
386 struct resource *resource;
387 void __iomem *base;
388};
389
390/* irq info structrue */
391struct msm_pcie_irq_info_t {
392 char *name;
393 uint32_t num;
394};
395
396/* phy info structure */
397struct msm_pcie_phy_info_t {
398 u32 offset;
399 u32 val;
400 u32 delay;
401};
402
403/* PCIe device info structure */
404struct msm_pcie_device_info {
405 u32 bdf;
406 struct pci_dev *dev;
407 short short_bdf;
408 u32 sid;
409 int domain;
410 void __iomem *conf_base;
411 unsigned long phy_address;
412 u32 dev_ctrlstts_offset;
413 struct msm_pcie_register_event *event_reg;
414 bool registered;
415};
416
417/* msm pcie device structure */
418struct msm_pcie_dev_t {
419 struct platform_device *pdev;
420 struct pci_dev *dev;
421 struct regulator *gdsc;
422 struct regulator *gdsc_smmu;
423 struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
424 struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
425 struct msm_pcie_clk_info_t clk[MSM_PCIE_MAX_CLK];
426 struct msm_pcie_clk_info_t pipeclk[MSM_PCIE_MAX_PIPE_CLK];
427 struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
428 struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
429 struct msm_pcie_irq_info_t msi[MSM_PCIE_MAX_MSI];
430 struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
431 struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
432
433 void __iomem *parf;
434 void __iomem *phy;
435 void __iomem *elbi;
436 void __iomem *dm_core;
437 void __iomem *conf;
438 void __iomem *bars;
439 void __iomem *tcsr;
440
441 uint32_t axi_bar_start;
442 uint32_t axi_bar_end;
443
444 struct resource *dev_mem_res;
445 struct resource *dev_io_res;
446
447 uint32_t wake_n;
448 uint32_t vreg_n;
449 uint32_t gpio_n;
450 uint32_t parf_deemph;
451 uint32_t parf_swing;
452
453 bool cfg_access;
454 spinlock_t cfg_lock;
455 unsigned long irqsave_flags;
456 struct mutex enumerate_lock;
457 struct mutex setup_lock;
458
459 struct irq_domain *irq_domain;
460 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
461 uint32_t msi_gicm_addr;
462 uint32_t msi_gicm_base;
463 bool use_msi;
464
465 enum msm_pcie_link_status link_status;
466 bool user_suspend;
467 bool disable_pc;
468 struct pci_saved_state *saved_state;
469
470 struct wakeup_source ws;
471 struct msm_bus_scale_pdata *bus_scale_table;
472 uint32_t bus_client;
473
474 bool l0s_supported;
475 bool l1_supported;
476 bool l1ss_supported;
477 bool common_clk_en;
478 bool clk_power_manage_en;
479 bool aux_clk_sync;
480 bool aer_enable;
481 bool smmu_exist;
482 uint32_t smmu_sid_base;
483 uint32_t n_fts;
484 bool ext_ref_clk;
485 bool common_phy;
486 uint32_t ep_latency;
487 uint32_t wr_halt_size;
488 uint32_t cpl_timeout;
489 uint32_t current_bdf;
490 short current_short_bdf;
491 uint32_t perst_delay_us_min;
492 uint32_t perst_delay_us_max;
493 uint32_t tlp_rd_size;
494 bool linkdown_panic;
Tony Truong9f2c7722017-02-28 15:02:27 -0800495 uint32_t boot_option;
Tony Truong349ee492014-10-01 17:35:56 -0700496
497 uint32_t rc_idx;
498 uint32_t phy_ver;
499 bool drv_ready;
500 bool enumerated;
501 struct work_struct handle_wake_work;
502 struct mutex recovery_lock;
503 spinlock_t linkdown_lock;
504 spinlock_t wakeup_lock;
505 spinlock_t global_irq_lock;
506 spinlock_t aer_lock;
507 ulong linkdown_counter;
508 ulong link_turned_on_counter;
509 ulong link_turned_off_counter;
510 ulong rc_corr_counter;
511 ulong rc_non_fatal_counter;
512 ulong rc_fatal_counter;
513 ulong ep_corr_counter;
514 ulong ep_non_fatal_counter;
515 ulong ep_fatal_counter;
516 bool suspending;
517 ulong wake_counter;
518 u32 num_active_ep;
519 u32 num_ep;
520 bool pending_ep_reg;
521 u32 phy_len;
522 u32 port_phy_len;
523 struct msm_pcie_phy_info_t *phy_sequence;
524 struct msm_pcie_phy_info_t *port_phy_sequence;
525 u32 ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
526 u32 rc_shadow[PCIE_CONF_SPACE_DW];
527 bool shadow_en;
528 bool bridge_found;
529 struct msm_pcie_register_event *event_reg;
530 unsigned int scm_dev_id;
531 bool power_on;
532 void *ipc_log;
533 void *ipc_log_long;
534 void *ipc_log_dump;
535 bool use_19p2mhz_aux_clk;
536 bool use_pinctrl;
537 struct pinctrl *pinctrl;
538 struct pinctrl_state *pins_default;
539 struct pinctrl_state *pins_sleep;
540 struct msm_pcie_device_info pcidev_table[MAX_DEVICE_NUM];
541};
542
543
544/* debug mask sys interface */
545static int msm_pcie_debug_mask;
546module_param_named(debug_mask, msm_pcie_debug_mask,
547 int, 0644);
548
549/* debugfs values */
550static u32 rc_sel;
551static u32 base_sel;
552static u32 wr_offset;
553static u32 wr_mask;
554static u32 wr_value;
555static ulong corr_counter_limit = 5;
556
557/* counter to keep track if common PHY needs to be configured */
558static u32 num_rc_on;
559
560/* global lock for PCIe common PHY */
561static struct mutex com_phy_lock;
562
563/* Table to track info of PCIe devices */
564static struct msm_pcie_device_info
565 msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
566
567/* PCIe driver state */
568struct pcie_drv_sta {
569 u32 rc_num;
570 struct mutex drv_lock;
571} pcie_drv;
572
573/* msm pcie device data */
574static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
575
576/* regulators */
577static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
578 {NULL, "vreg-3.3", 0, 0, 0, false},
579 {NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
580 {NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
581 {NULL, "vreg-cx", 0, 0, 0, false}
582};
583
584/* GPIOs */
585static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
586 {"perst-gpio", 0, 1, 0, 0, 1},
587 {"wake-gpio", 0, 0, 0, 0, 0},
588 {"qcom,ep-gpio", 0, 1, 1, 0, 0}
589};
590
591/* resets */
592static struct msm_pcie_reset_info_t
593msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
594 {
Tony Truongb213ac12017-04-05 15:21:20 -0700595 {NULL, "pcie_0_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700596 {NULL, "pcie_phy_reset", false},
597 {NULL, "pcie_phy_com_reset", false},
598 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
599 {NULL, "pcie_0_phy_reset", false}
600 },
601 {
Tony Truongb213ac12017-04-05 15:21:20 -0700602 {NULL, "pcie_1_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700603 {NULL, "pcie_phy_reset", false},
604 {NULL, "pcie_phy_com_reset", false},
605 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
606 {NULL, "pcie_1_phy_reset", false}
607 },
608 {
Tony Truongb213ac12017-04-05 15:21:20 -0700609 {NULL, "pcie_2_core_reset", false},
Tony Truong349ee492014-10-01 17:35:56 -0700610 {NULL, "pcie_phy_reset", false},
611 {NULL, "pcie_phy_com_reset", false},
612 {NULL, "pcie_phy_nocsr_com_phy_reset", false},
613 {NULL, "pcie_2_phy_reset", false}
614 }
615};
616
617/* pipe reset */
618static struct msm_pcie_reset_info_t
619msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
620 {
621 {NULL, "pcie_0_phy_pipe_reset", false}
622 },
623 {
624 {NULL, "pcie_1_phy_pipe_reset", false}
625 },
626 {
627 {NULL, "pcie_2_phy_pipe_reset", false}
628 }
629};
630
631/* clocks */
632static struct msm_pcie_clk_info_t
633 msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
634 {
635 {NULL, "pcie_0_ref_clk_src", 0, false, false},
636 {NULL, "pcie_0_aux_clk", 1010000, false, true},
637 {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
638 {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
639 {NULL, "pcie_0_slv_axi_clk", 0, true, true},
640 {NULL, "pcie_0_ldo", 0, false, true},
641 {NULL, "pcie_0_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700642 {NULL, "pcie_0_slv_q2a_axi_clk", 0, false, false},
643 {NULL, "pcie_phy_refgen_clk", 0, false, false},
644 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700645 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
646 {NULL, "pcie_phy_aux_clk", 0, false, false}
647 },
648 {
649 {NULL, "pcie_1_ref_clk_src", 0, false, false},
650 {NULL, "pcie_1_aux_clk", 1010000, false, true},
651 {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
652 {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
653 {NULL, "pcie_1_slv_axi_clk", 0, true, true},
654 {NULL, "pcie_1_ldo", 0, false, true},
655 {NULL, "pcie_1_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700656 {NULL, "pcie_1_slv_q2a_axi_clk", 0, false, false},
657 {NULL, "pcie_phy_refgen_clk", 0, false, false},
658 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700659 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
660 {NULL, "pcie_phy_aux_clk", 0, false, false}
661 },
662 {
663 {NULL, "pcie_2_ref_clk_src", 0, false, false},
664 {NULL, "pcie_2_aux_clk", 1010000, false, true},
665 {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
666 {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
667 {NULL, "pcie_2_slv_axi_clk", 0, true, true},
668 {NULL, "pcie_2_ldo", 0, false, true},
669 {NULL, "pcie_2_smmu_clk", 0, false, false},
Tony Truongb213ac12017-04-05 15:21:20 -0700670 {NULL, "pcie_2_slv_q2a_axi_clk", 0, false, false},
671 {NULL, "pcie_phy_refgen_clk", 0, false, false},
672 {NULL, "pcie_tbu_clk", 0, false, false},
Tony Truong349ee492014-10-01 17:35:56 -0700673 {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
674 {NULL, "pcie_phy_aux_clk", 0, false, false}
675 }
676};
677
678/* Pipe Clocks */
679static struct msm_pcie_clk_info_t
680 msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
681 {
682 {NULL, "pcie_0_pipe_clk", 125000000, true, true},
683 },
684 {
685 {NULL, "pcie_1_pipe_clk", 125000000, true, true},
686 },
687 {
688 {NULL, "pcie_2_pipe_clk", 125000000, true, true},
689 }
690};
691
692/* resources */
693static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
694 {"parf", 0, 0},
695 {"phy", 0, 0},
696 {"dm_core", 0, 0},
697 {"elbi", 0, 0},
698 {"conf", 0, 0},
699 {"io", 0, 0},
700 {"bars", 0, 0},
701 {"tcsr", 0, 0}
702};
703
704/* irqs */
705static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
706 {"int_msi", 0},
707 {"int_a", 0},
708 {"int_b", 0},
709 {"int_c", 0},
710 {"int_d", 0},
711 {"int_pls_pme", 0},
712 {"int_pme_legacy", 0},
713 {"int_pls_err", 0},
714 {"int_aer_legacy", 0},
715 {"int_pls_link_up", 0},
716 {"int_pls_link_down", 0},
717 {"int_bridge_flush_n", 0},
718 {"int_global_int", 0}
719};
720
721/* MSIs */
722static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
723 {"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
724 {"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
725 {"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
726 {"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
727 {"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
728 {"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
729 {"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
730 {"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
731};
732
Tony Truong7772e692017-04-13 17:03:34 -0700733static int msm_pcie_config_device(struct pci_dev *dev, void *pdev);
734
Tony Truong349ee492014-10-01 17:35:56 -0700735#ifdef CONFIG_ARM
736#define PCIE_BUS_PRIV_DATA(bus) \
737 (((struct pci_sys_data *)bus->sysdata)->private_data)
738
739static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
740
741static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
742{
743 msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
744 msm_pcie_sys_data[dev->rc_idx].private_data = dev;
745
746 return &msm_pcie_sys_data[dev->rc_idx];
747}
748
749static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
750{
751 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
752}
753#else
754#define PCIE_BUS_PRIV_DATA(bus) \
755 (struct msm_pcie_dev_t *)(bus->sysdata)
756
757static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
758{
759 return dev;
760}
761
762static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
763{
764}
765#endif
766
767static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value)
768{
769 writel_relaxed(value, base + offset);
770 /* ensure that changes propagated to the hardware */
771 wmb();
772}
773
774static inline void msm_pcie_write_reg_field(void *base, u32 offset,
775 const u32 mask, u32 val)
776{
777 u32 shift = find_first_bit((void *)&mask, 32);
778 u32 tmp = readl_relaxed(base + offset);
779
780 tmp &= ~mask; /* clear written bits */
781 val = tmp | (val << shift);
782 writel_relaxed(val, base + offset);
783 /* ensure that changes propagated to the hardware */
784 wmb();
785}
786
787static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
788 struct msm_pcie_clk_info_t *info)
789{
790 int ret;
791
792 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
793 if (ret)
794 PCIE_ERR(dev,
795 "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
796 dev->rc_idx, info->name, ret);
797 else
798 PCIE_DBG2(dev,
799 "PCIe: RC%d configured core memory for clk %s.\n",
800 dev->rc_idx, info->name);
801
802 ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
803 if (ret)
804 PCIE_ERR(dev,
805 "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
806 dev->rc_idx, info->name, ret);
807 else
808 PCIE_DBG2(dev,
809 "PCIe: RC%d configured peripheral memory for clk %s.\n",
810 dev->rc_idx, info->name);
811}
812
Tony Truong349ee492014-10-01 17:35:56 -0700813static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
814{
815 int i, size;
Tony Truong349ee492014-10-01 17:35:56 -0700816
817 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
818 for (i = 0; i < size; i += 32) {
819 PCIE_DUMP(dev,
820 "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
821 dev->rc_idx, i,
822 readl_relaxed(dev->phy + i),
823 readl_relaxed(dev->phy + (i + 4)),
824 readl_relaxed(dev->phy + (i + 8)),
825 readl_relaxed(dev->phy + (i + 12)),
826 readl_relaxed(dev->phy + (i + 16)),
827 readl_relaxed(dev->phy + (i + 20)),
828 readl_relaxed(dev->phy + (i + 24)),
829 readl_relaxed(dev->phy + (i + 28)));
830 }
831}
832
Tony Truong349ee492014-10-01 17:35:56 -0700833static void pcie_phy_init(struct msm_pcie_dev_t *dev)
834{
835 int i;
836 struct msm_pcie_phy_info_t *phy_seq;
837
838 PCIE_DBG(dev,
839 "RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
840 dev->rc_idx);
841
842 if (dev->phy_sequence) {
843 i = dev->phy_len;
844 phy_seq = dev->phy_sequence;
845 while (i--) {
846 msm_pcie_write_reg(dev->phy,
847 phy_seq->offset,
848 phy_seq->val);
849 if (phy_seq->delay)
850 usleep_range(phy_seq->delay,
851 phy_seq->delay + 1);
852 phy_seq++;
853 }
Tony Truong349ee492014-10-01 17:35:56 -0700854 }
855}
856
857static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
858{
859 int i;
860 struct msm_pcie_phy_info_t *phy_seq;
Tony Truong349ee492014-10-01 17:35:56 -0700861
862 PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
863
Tony Truong349ee492014-10-01 17:35:56 -0700864 if (dev->port_phy_sequence) {
865 i = dev->port_phy_len;
866 phy_seq = dev->port_phy_sequence;
867 while (i--) {
868 msm_pcie_write_reg(dev->phy,
869 phy_seq->offset,
870 phy_seq->val);
871 if (phy_seq->delay)
872 usleep_range(phy_seq->delay,
873 phy_seq->delay + 1);
874 phy_seq++;
875 }
Tony Truong349ee492014-10-01 17:35:56 -0700876 }
877
Tony Truong349ee492014-10-01 17:35:56 -0700878}
879
880static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
881{
882 if (dev->phy_ver >= 0x20) {
883 if (readl_relaxed(dev->phy +
884 PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) &
885 BIT(6))
886 return false;
887 else
888 return true;
889 }
890
891 if (!(readl_relaxed(dev->phy + PCIE_COM_PCS_READY_STATUS) & 0x1))
892 return false;
893 else
894 return true;
895}
Tony Truong349ee492014-10-01 17:35:56 -0700896
897static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
898{
899 int ret, scm_ret;
900
901 if (!dev) {
902 pr_err("PCIe: the input pcie dev is NULL.\n");
903 return -ENODEV;
904 }
905
906 ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
907 if (ret || scm_ret) {
908 PCIE_ERR(dev,
909 "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
910 dev->rc_idx, ret, scm_ret);
911 return ret ? ret : -EINVAL;
912 }
913
914 return 0;
915}
916
917static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
918 u32 offset)
919{
920 if (offset % 4) {
921 PCIE_ERR(dev,
922 "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
923 dev->rc_idx, offset);
924 return MSM_PCIE_ERROR;
925 }
926
927 return 0;
928}
929
930static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
931 bool check_sw_stts,
932 bool check_ep,
933 void __iomem *ep_conf)
934{
935 u32 val;
936
937 if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
938 PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
939 dev->rc_idx);
940 return false;
941 }
942
943 if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
944 PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
945 dev->rc_idx);
946 return false;
947 }
948
949 val = readl_relaxed(dev->dm_core);
950 PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
951 dev->rc_idx, val);
952 if (val == PCIE_LINK_DOWN) {
953 PCIE_ERR(dev,
954 "PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
955 dev->rc_idx, dev->rc_idx, val);
956 return false;
957 }
958
959 if (check_ep) {
960 val = readl_relaxed(ep_conf);
961 PCIE_DBG(dev,
962 "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
963 dev->rc_idx, val);
964 if (val == PCIE_LINK_DOWN) {
965 PCIE_ERR(dev,
966 "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
967 dev->rc_idx, dev->rc_idx, val);
968 return false;
969 }
970 }
971
972 return true;
973}
974
975static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
976{
977 int i, j;
978 u32 val = 0;
979 u32 *shadow;
980 void *cfg = dev->conf;
981
982 for (i = 0; i < MAX_DEVICE_NUM; i++) {
983 if (!rc && !dev->pcidev_table[i].bdf)
984 break;
985 if (rc) {
986 cfg = dev->dm_core;
987 shadow = dev->rc_shadow;
988 } else {
989 if (!msm_pcie_confirm_linkup(dev, false, true,
990 dev->pcidev_table[i].conf_base))
991 continue;
992
993 shadow = dev->ep_shadow[i];
994 PCIE_DBG(dev,
995 "PCIe Device: %02x:%02x.%01x\n",
996 dev->pcidev_table[i].bdf >> 24,
997 dev->pcidev_table[i].bdf >> 19 & 0x1f,
998 dev->pcidev_table[i].bdf >> 16 & 0x07);
999 }
1000 for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
1001 val = shadow[j];
1002 if (val != PCIE_CLEAR) {
1003 PCIE_DBG3(dev,
1004 "PCIe: before recovery:cfg 0x%x:0x%x\n",
1005 j * 4, readl_relaxed(cfg + j * 4));
1006 PCIE_DBG3(dev,
1007 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1008 j, j * 4, val);
1009 writel_relaxed(val, cfg + j * 4);
1010 /* ensure changes propagated to the hardware */
1011 wmb();
1012 PCIE_DBG3(dev,
1013 "PCIe: after recovery:cfg 0x%x:0x%x\n\n",
1014 j * 4, readl_relaxed(cfg + j * 4));
1015 }
1016 }
1017 if (rc)
1018 break;
1019
1020 pci_save_state(dev->pcidev_table[i].dev);
1021 cfg += SZ_4K;
1022 }
1023}
1024
1025static void msm_pcie_write_mask(void __iomem *addr,
1026 uint32_t clear_mask, uint32_t set_mask)
1027{
1028 uint32_t val;
1029
1030 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
1031 writel_relaxed(val, addr);
1032 wmb(); /* ensure data is written to hardware register */
1033}
1034
1035static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
1036{
1037 int i, size;
1038 u32 original;
1039
1040 PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
1041
1042 original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
1043 for (i = 1; i <= 0x1A; i++) {
1044 msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
1045 0xFF0000, i << 16);
1046 PCIE_DUMP(dev,
1047 "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
1048 dev->rc_idx,
1049 readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
1050 readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
1051 }
1052 writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
1053
1054 PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
1055
1056 size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
1057 for (i = 0; i < size; i += 32) {
1058 PCIE_DUMP(dev,
1059 "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1060 dev->rc_idx, i,
1061 readl_relaxed(dev->parf + i),
1062 readl_relaxed(dev->parf + (i + 4)),
1063 readl_relaxed(dev->parf + (i + 8)),
1064 readl_relaxed(dev->parf + (i + 12)),
1065 readl_relaxed(dev->parf + (i + 16)),
1066 readl_relaxed(dev->parf + (i + 20)),
1067 readl_relaxed(dev->parf + (i + 24)),
1068 readl_relaxed(dev->parf + (i + 28)));
1069 }
1070}
1071
1072static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
1073{
1074 PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
1075 dev->rc_idx, dev->enumerated ? "" : "not");
1076 PCIE_DBG_FS(dev, "PCIe: link is %s\n",
1077 (dev->link_status == MSM_PCIE_LINK_ENABLED)
1078 ? "enabled" : "disabled");
1079 PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
1080 dev->cfg_access ? "" : "not");
1081 PCIE_DBG_FS(dev, "use_msi is %d\n",
1082 dev->use_msi);
1083 PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
1084 dev->use_pinctrl);
1085 PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
1086 dev->use_19p2mhz_aux_clk);
1087 PCIE_DBG_FS(dev, "user_suspend is %d\n",
1088 dev->user_suspend);
1089 PCIE_DBG_FS(dev, "num_ep: %d\n",
1090 dev->num_ep);
1091 PCIE_DBG_FS(dev, "num_active_ep: %d\n",
1092 dev->num_active_ep);
1093 PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
1094 dev->pending_ep_reg ? "true" : "false");
1095 PCIE_DBG_FS(dev, "phy_len is %d",
1096 dev->phy_len);
1097 PCIE_DBG_FS(dev, "port_phy_len is %d",
1098 dev->port_phy_len);
1099 PCIE_DBG_FS(dev, "disable_pc is %d",
1100 dev->disable_pc);
1101 PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
1102 dev->l0s_supported ? "" : "not");
1103 PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
1104 dev->l1_supported ? "" : "not");
1105 PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
1106 dev->l1ss_supported ? "" : "not");
1107 PCIE_DBG_FS(dev, "common_clk_en is %d\n",
1108 dev->common_clk_en);
1109 PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
1110 dev->clk_power_manage_en);
1111 PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
1112 dev->aux_clk_sync);
1113 PCIE_DBG_FS(dev, "AER is %s enable\n",
1114 dev->aer_enable ? "" : "not");
1115 PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
1116 dev->ext_ref_clk);
Tony Truong9f2c7722017-02-28 15:02:27 -08001117 PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
1118 dev->boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07001119 PCIE_DBG_FS(dev, "phy_ver is %d\n",
1120 dev->phy_ver);
1121 PCIE_DBG_FS(dev, "drv_ready is %d\n",
1122 dev->drv_ready);
1123 PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
1124 dev->linkdown_panic);
1125 PCIE_DBG_FS(dev, "the link is %s suspending\n",
1126 dev->suspending ? "" : "not");
1127 PCIE_DBG_FS(dev, "shadow is %s enabled\n",
1128 dev->shadow_en ? "" : "not");
1129 PCIE_DBG_FS(dev, "the power of RC is %s on\n",
1130 dev->power_on ? "" : "not");
1131 PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
1132 dev->msi_gicm_addr);
1133 PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
1134 dev->msi_gicm_base);
1135 PCIE_DBG_FS(dev, "bus_client: %d\n",
1136 dev->bus_client);
1137 PCIE_DBG_FS(dev, "current short bdf: %d\n",
1138 dev->current_short_bdf);
1139 PCIE_DBG_FS(dev, "smmu does %s exist\n",
1140 dev->smmu_exist ? "" : "not");
1141 PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
1142 dev->smmu_sid_base);
1143 PCIE_DBG_FS(dev, "n_fts: %d\n",
1144 dev->n_fts);
1145 PCIE_DBG_FS(dev, "common_phy: %d\n",
1146 dev->common_phy);
1147 PCIE_DBG_FS(dev, "ep_latency: %dms\n",
1148 dev->ep_latency);
1149 PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
1150 dev->wr_halt_size);
1151 PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
1152 dev->cpl_timeout);
1153 PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
1154 dev->current_bdf);
1155 PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
1156 dev->perst_delay_us_min);
1157 PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
1158 dev->perst_delay_us_max);
1159 PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
1160 dev->tlp_rd_size);
1161 PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
1162 dev->rc_corr_counter);
1163 PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
1164 dev->rc_non_fatal_counter);
1165 PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
1166 dev->rc_fatal_counter);
1167 PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
1168 dev->ep_corr_counter);
1169 PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
1170 dev->ep_non_fatal_counter);
1171 PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
1172 dev->ep_fatal_counter);
1173 PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
1174 dev->linkdown_counter);
1175 PCIE_DBG_FS(dev, "wake_counter: %lu\n",
1176 dev->wake_counter);
1177 PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
1178 dev->link_turned_on_counter);
1179 PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
1180 dev->link_turned_off_counter);
1181}
1182
1183static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
1184{
1185 int i, j;
1186 u32 val = 0;
1187 u32 *shadow;
1188
1189 for (i = 0; i < MAX_DEVICE_NUM; i++) {
1190 if (!rc && !dev->pcidev_table[i].bdf)
1191 break;
1192 if (rc) {
1193 shadow = dev->rc_shadow;
1194 } else {
1195 shadow = dev->ep_shadow[i];
1196 PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
1197 dev->pcidev_table[i].bdf >> 24,
1198 dev->pcidev_table[i].bdf >> 19 & 0x1f,
1199 dev->pcidev_table[i].bdf >> 16 & 0x07);
1200 }
1201 for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
1202 val = shadow[j];
1203 if (val != PCIE_CLEAR) {
1204 PCIE_DBG_FS(dev,
1205 "PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
1206 j, j * 4, val);
1207 }
1208 }
1209 if (rc)
1210 break;
1211 }
1212}
1213
1214static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
1215 u32 testcase)
1216{
1217 int ret, i;
1218 u32 base_sel_size = 0;
1219 u32 val = 0;
1220 u32 current_offset = 0;
1221 u32 ep_l1sub_ctrl1_offset = 0;
1222 u32 ep_l1sub_cap_reg1_offset = 0;
1223 u32 ep_link_ctrlstts_offset = 0;
1224 u32 ep_dev_ctrl2stts2_offset = 0;
1225
1226 if (testcase >= 5 && testcase <= 10) {
1227 current_offset =
1228 readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
1229
1230 while (current_offset) {
1231 val = readl_relaxed(dev->conf + current_offset);
1232 if ((val & 0xff) == PCIE20_CAP_ID) {
1233 ep_link_ctrlstts_offset = current_offset +
1234 0x10;
1235 ep_dev_ctrl2stts2_offset = current_offset +
1236 0x28;
1237 break;
1238 }
1239 current_offset = (val >> 8) & 0xff;
1240 }
1241
1242 if (!ep_link_ctrlstts_offset)
1243 PCIE_DBG(dev,
1244 "RC%d endpoint does not support PCIe capability registers\n",
1245 dev->rc_idx);
1246 else
1247 PCIE_DBG(dev,
1248 "RC%d: ep_link_ctrlstts_offset: 0x%x\n",
1249 dev->rc_idx, ep_link_ctrlstts_offset);
1250 }
1251
1252 switch (testcase) {
1253 case 0: /* output status */
1254 PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
1255 dev->rc_idx);
1256 msm_pcie_show_status(dev);
1257 break;
1258 case 1: /* disable link */
1259 PCIE_DBG_FS(dev,
1260 "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
1261 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
1262 dev->dev, NULL,
1263 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1264 if (ret)
1265 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
1266 __func__);
1267 else
1268 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
1269 __func__);
1270 break;
1271 case 2: /* enable link and recover config space for RC and EP */
1272 PCIE_DBG_FS(dev,
1273 "\n\nPCIe: RC%d: enable link and recover config space\n\n",
1274 dev->rc_idx);
1275 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
1276 dev->dev, NULL,
1277 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1278 if (ret)
1279 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
1280 __func__);
1281 else {
1282 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
1283 msm_pcie_recover_config(dev->dev);
1284 }
1285 break;
1286 case 3: /*
1287 * disable and enable link, recover config space for
1288 * RC and EP
1289 */
1290 PCIE_DBG_FS(dev,
1291 "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
1292 dev->rc_idx);
1293 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
1294 dev->dev, NULL,
1295 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1296 if (ret)
1297 PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
1298 __func__);
1299 else
1300 PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
1301 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
1302 dev->dev, NULL,
1303 MSM_PCIE_CONFIG_NO_CFG_RESTORE);
1304 if (ret)
1305 PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
1306 __func__);
1307 else {
1308 PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
1309 msm_pcie_recover_config(dev->dev);
1310 }
1311 break;
1312 case 4: /* dump shadow registers for RC and EP */
1313 PCIE_DBG_FS(dev,
1314 "\n\nPCIe: RC%d: dumping RC shadow registers\n",
1315 dev->rc_idx);
1316 msm_pcie_shadow_dump(dev, true);
1317
1318 PCIE_DBG_FS(dev,
1319 "\n\nPCIe: RC%d: dumping EP shadow registers\n",
1320 dev->rc_idx);
1321 msm_pcie_shadow_dump(dev, false);
1322 break;
1323 case 5: /* disable L0s */
1324 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
1325 dev->rc_idx);
1326 msm_pcie_write_mask(dev->dm_core +
1327 PCIE20_CAP_LINKCTRLSTATUS,
1328 BIT(0), 0);
1329 msm_pcie_write_mask(dev->conf +
1330 ep_link_ctrlstts_offset,
1331 BIT(0), 0);
1332 if (dev->shadow_en) {
1333 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1334 readl_relaxed(dev->dm_core +
1335 PCIE20_CAP_LINKCTRLSTATUS);
1336 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1337 readl_relaxed(dev->conf +
1338 ep_link_ctrlstts_offset);
1339 }
1340 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1341 readl_relaxed(dev->dm_core +
1342 PCIE20_CAP_LINKCTRLSTATUS));
1343 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1344 readl_relaxed(dev->conf +
1345 ep_link_ctrlstts_offset));
1346 break;
1347 case 6: /* enable L0s */
1348 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
1349 dev->rc_idx);
1350 msm_pcie_write_mask(dev->dm_core +
1351 PCIE20_CAP_LINKCTRLSTATUS,
1352 0, BIT(0));
1353 msm_pcie_write_mask(dev->conf +
1354 ep_link_ctrlstts_offset,
1355 0, BIT(0));
1356 if (dev->shadow_en) {
1357 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1358 readl_relaxed(dev->dm_core +
1359 PCIE20_CAP_LINKCTRLSTATUS);
1360 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1361 readl_relaxed(dev->conf +
1362 ep_link_ctrlstts_offset);
1363 }
1364 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1365 readl_relaxed(dev->dm_core +
1366 PCIE20_CAP_LINKCTRLSTATUS));
1367 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1368 readl_relaxed(dev->conf +
1369 ep_link_ctrlstts_offset));
1370 break;
1371 case 7: /* disable L1 */
1372 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
1373 dev->rc_idx);
1374 msm_pcie_write_mask(dev->dm_core +
1375 PCIE20_CAP_LINKCTRLSTATUS,
1376 BIT(1), 0);
1377 msm_pcie_write_mask(dev->conf +
1378 ep_link_ctrlstts_offset,
1379 BIT(1), 0);
1380 if (dev->shadow_en) {
1381 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1382 readl_relaxed(dev->dm_core +
1383 PCIE20_CAP_LINKCTRLSTATUS);
1384 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1385 readl_relaxed(dev->conf +
1386 ep_link_ctrlstts_offset);
1387 }
1388 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1389 readl_relaxed(dev->dm_core +
1390 PCIE20_CAP_LINKCTRLSTATUS));
1391 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1392 readl_relaxed(dev->conf +
1393 ep_link_ctrlstts_offset));
1394 break;
1395 case 8: /* enable L1 */
1396 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
1397 dev->rc_idx);
1398 msm_pcie_write_mask(dev->dm_core +
1399 PCIE20_CAP_LINKCTRLSTATUS,
1400 0, BIT(1));
1401 msm_pcie_write_mask(dev->conf +
1402 ep_link_ctrlstts_offset,
1403 0, BIT(1));
1404 if (dev->shadow_en) {
1405 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
1406 readl_relaxed(dev->dm_core +
1407 PCIE20_CAP_LINKCTRLSTATUS);
1408 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
1409 readl_relaxed(dev->conf +
1410 ep_link_ctrlstts_offset);
1411 }
1412 PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
1413 readl_relaxed(dev->dm_core +
1414 PCIE20_CAP_LINKCTRLSTATUS));
1415 PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
1416 readl_relaxed(dev->conf +
1417 ep_link_ctrlstts_offset));
1418 break;
1419 case 9: /* disable L1ss */
1420 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
1421 dev->rc_idx);
1422 current_offset = PCIE_EXT_CAP_OFFSET;
1423 while (current_offset) {
1424 val = readl_relaxed(dev->conf + current_offset);
1425 if ((val & 0xffff) == L1SUB_CAP_ID) {
1426 ep_l1sub_ctrl1_offset =
1427 current_offset + 0x8;
1428 break;
1429 }
1430 current_offset = val >> 20;
1431 }
1432 if (!ep_l1sub_ctrl1_offset) {
1433 PCIE_DBG_FS(dev,
1434 "PCIe: RC%d endpoint does not support l1ss registers\n",
1435 dev->rc_idx);
1436 break;
1437 }
1438
1439 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
1440 dev->rc_idx, ep_l1sub_ctrl1_offset);
1441
1442 msm_pcie_write_reg_field(dev->dm_core,
1443 PCIE20_L1SUB_CONTROL1,
1444 0xf, 0);
1445 msm_pcie_write_mask(dev->dm_core +
1446 PCIE20_DEVICE_CONTROL2_STATUS2,
1447 BIT(10), 0);
1448 msm_pcie_write_reg_field(dev->conf,
1449 ep_l1sub_ctrl1_offset,
1450 0xf, 0);
1451 msm_pcie_write_mask(dev->conf +
1452 ep_dev_ctrl2stts2_offset,
1453 BIT(10), 0);
1454 if (dev->shadow_en) {
1455 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
1456 readl_relaxed(dev->dm_core +
1457 PCIE20_L1SUB_CONTROL1);
1458 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
1459 readl_relaxed(dev->dm_core +
1460 PCIE20_DEVICE_CONTROL2_STATUS2);
1461 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
1462 readl_relaxed(dev->conf +
1463 ep_l1sub_ctrl1_offset);
1464 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
1465 readl_relaxed(dev->conf +
1466 ep_dev_ctrl2stts2_offset);
1467 }
1468 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
1469 readl_relaxed(dev->dm_core +
1470 PCIE20_L1SUB_CONTROL1));
1471 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
1472 readl_relaxed(dev->dm_core +
1473 PCIE20_DEVICE_CONTROL2_STATUS2));
1474 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
1475 readl_relaxed(dev->conf +
1476 ep_l1sub_ctrl1_offset));
1477 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
1478 readl_relaxed(dev->conf +
1479 ep_dev_ctrl2stts2_offset));
1480 break;
1481 case 10: /* enable L1ss */
1482 PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
1483 dev->rc_idx);
1484 current_offset = PCIE_EXT_CAP_OFFSET;
1485 while (current_offset) {
1486 val = readl_relaxed(dev->conf + current_offset);
1487 if ((val & 0xffff) == L1SUB_CAP_ID) {
1488 ep_l1sub_cap_reg1_offset =
1489 current_offset + 0x4;
1490 ep_l1sub_ctrl1_offset =
1491 current_offset + 0x8;
1492 break;
1493 }
1494 current_offset = val >> 20;
1495 }
1496 if (!ep_l1sub_ctrl1_offset) {
1497 PCIE_DBG_FS(dev,
1498 "PCIe: RC%d endpoint does not support l1ss registers\n",
1499 dev->rc_idx);
1500 break;
1501 }
1502
1503 val = readl_relaxed(dev->conf +
1504 ep_l1sub_cap_reg1_offset);
1505
1506 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
1507 val);
1508 PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
1509 dev->rc_idx, ep_l1sub_ctrl1_offset);
1510
1511 val &= 0xf;
1512
1513 msm_pcie_write_reg_field(dev->dm_core,
1514 PCIE20_L1SUB_CONTROL1,
1515 0xf, val);
1516 msm_pcie_write_mask(dev->dm_core +
1517 PCIE20_DEVICE_CONTROL2_STATUS2,
1518 0, BIT(10));
1519 msm_pcie_write_reg_field(dev->conf,
1520 ep_l1sub_ctrl1_offset,
1521 0xf, val);
1522 msm_pcie_write_mask(dev->conf +
1523 ep_dev_ctrl2stts2_offset,
1524 0, BIT(10));
1525 if (dev->shadow_en) {
1526 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
1527 readl_relaxed(dev->dm_core +
1528 PCIE20_L1SUB_CONTROL1);
1529 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
1530 readl_relaxed(dev->dm_core +
1531 PCIE20_DEVICE_CONTROL2_STATUS2);
1532 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
1533 readl_relaxed(dev->conf +
1534 ep_l1sub_ctrl1_offset);
1535 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
1536 readl_relaxed(dev->conf +
1537 ep_dev_ctrl2stts2_offset);
1538 }
1539 PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
1540 readl_relaxed(dev->dm_core +
1541 PCIE20_L1SUB_CONTROL1));
1542 PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
1543 readl_relaxed(dev->dm_core +
1544 PCIE20_DEVICE_CONTROL2_STATUS2));
1545 PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
1546 readl_relaxed(dev->conf +
1547 ep_l1sub_ctrl1_offset));
1548 PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
1549 readl_relaxed(dev->conf +
1550 ep_dev_ctrl2stts2_offset));
1551 break;
1552 case 11: /* enumerate PCIe */
1553 PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
1554 dev->rc_idx);
1555 if (dev->enumerated)
1556 PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
1557 dev->rc_idx);
1558 else {
1559 if (!msm_pcie_enumerate(dev->rc_idx))
1560 PCIE_DBG_FS(dev,
1561 "PCIe: RC%d is successfully enumerated\n",
1562 dev->rc_idx);
1563 else
1564 PCIE_DBG_FS(dev,
1565 "PCIe: RC%d enumeration failed\n",
1566 dev->rc_idx);
1567 }
1568 break;
1569 case 12: /* write a value to a register */
1570 PCIE_DBG_FS(dev,
1571 "\n\nPCIe: RC%d: writing a value to a register\n\n",
1572 dev->rc_idx);
1573
1574 if (!base_sel) {
1575 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
1576 break;
1577 }
1578
1579 PCIE_DBG_FS(dev,
1580 "base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
1581 dev->res[base_sel - 1].name,
1582 dev->res[base_sel - 1].base,
1583 wr_offset, wr_mask, wr_value);
1584
Tony Truong95747382017-01-06 14:03:03 -08001585 base_sel_size = resource_size(dev->res[base_sel - 1].resource);
1586
1587 if (wr_offset > base_sel_size - 4 ||
1588 msm_pcie_check_align(dev, wr_offset))
1589 PCIE_DBG_FS(dev,
1590 "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
1591 dev->rc_idx, wr_offset, base_sel_size - 4);
1592 else
1593 msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
1594 wr_offset, wr_mask, wr_value);
Tony Truong349ee492014-10-01 17:35:56 -07001595
1596 break;
1597 case 13: /* dump all registers of base_sel */
1598 if (!base_sel) {
1599 PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
1600 break;
1601 } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
1602 pcie_parf_dump(dev);
1603 break;
1604 } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
1605 pcie_phy_dump(dev);
1606 break;
1607 } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
1608 base_sel_size = 0x1000;
1609 } else {
1610 base_sel_size = resource_size(
1611 dev->res[base_sel - 1].resource);
1612 }
1613
1614 PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
1615 dev->res[base_sel - 1].name, dev->rc_idx);
1616
1617 for (i = 0; i < base_sel_size; i += 32) {
1618 PCIE_DBG_FS(dev,
1619 "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
1620 i, readl_relaxed(dev->res[base_sel - 1].base + i),
1621 readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
1622 readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
1623 readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
1624 readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
1625 readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
1626 readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
1627 readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
1628 }
1629 break;
1630 default:
1631 PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
1632 break;
1633 }
1634}
1635
1636int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
1637 u32 offset, u32 mask, u32 value)
1638{
1639 int ret = 0;
1640 struct msm_pcie_dev_t *pdev = NULL;
1641
1642 if (!dev) {
1643 pr_err("PCIe: the input pci dev is NULL.\n");
1644 return -ENODEV;
1645 }
1646
1647 if (option == 12 || option == 13) {
1648 if (!base || base > 5) {
1649 PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
1650 PCIE_DBG_FS(pdev,
1651 "PCIe: base_sel is still 0x%x\n", base_sel);
1652 return -EINVAL;
1653 }
1654
1655 base_sel = base;
1656 PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
1657
1658 if (option == 12) {
1659 wr_offset = offset;
1660 wr_mask = mask;
1661 wr_value = value;
1662
1663 PCIE_DBG_FS(pdev,
1664 "PCIe: wr_offset is now 0x%x\n", wr_offset);
1665 PCIE_DBG_FS(pdev,
1666 "PCIe: wr_mask is now 0x%x\n", wr_mask);
1667 PCIE_DBG_FS(pdev,
1668 "PCIe: wr_value is now 0x%x\n", wr_value);
1669 }
1670 }
1671
1672 pdev = PCIE_BUS_PRIV_DATA(dev->bus);
1673 rc_sel = 1 << pdev->rc_idx;
1674
1675 msm_pcie_sel_debug_testcase(pdev, option);
1676
1677 return ret;
1678}
1679EXPORT_SYMBOL(msm_pcie_debug_info);
1680
Tony Truongbd9a3412017-02-27 18:30:13 -08001681#ifdef CONFIG_SYSFS
1682static ssize_t msm_pcie_enumerate_store(struct device *dev,
1683 struct device_attribute *attr,
1684 const char *buf, size_t count)
1685{
1686 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
1687 dev_get_drvdata(dev);
1688
1689 if (pcie_dev)
1690 msm_pcie_enumerate(pcie_dev->rc_idx);
1691
1692 return count;
1693}
1694
1695static DEVICE_ATTR(enumerate, 0200, NULL, msm_pcie_enumerate_store);
1696
1697static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
1698{
1699 int ret;
1700
1701 ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
1702 if (ret)
1703 PCIE_DBG_FS(dev,
1704 "RC%d: failed to create sysfs enumerate node\n",
1705 dev->rc_idx);
1706}
1707
1708static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
1709{
1710 if (dev->pdev)
1711 device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
1712}
1713#else
1714static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
1715{
1716}
1717
1718static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
1719{
1720}
1721#endif
1722
Tony Truong349ee492014-10-01 17:35:56 -07001723#ifdef CONFIG_DEBUG_FS
1724static struct dentry *dent_msm_pcie;
1725static struct dentry *dfile_rc_sel;
1726static struct dentry *dfile_case;
1727static struct dentry *dfile_base_sel;
1728static struct dentry *dfile_linkdown_panic;
1729static struct dentry *dfile_wr_offset;
1730static struct dentry *dfile_wr_mask;
1731static struct dentry *dfile_wr_value;
Tony Truong9f2c7722017-02-28 15:02:27 -08001732static struct dentry *dfile_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07001733static struct dentry *dfile_aer_enable;
1734static struct dentry *dfile_corr_counter_limit;
1735
1736static u32 rc_sel_max;
1737
1738static ssize_t msm_pcie_cmd_debug(struct file *file,
1739 const char __user *buf,
1740 size_t count, loff_t *ppos)
1741{
1742 unsigned long ret;
1743 char str[MAX_MSG_LEN];
1744 unsigned int testcase = 0;
1745 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08001746 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001747
Tony Truongfdbd5672017-01-06 16:23:14 -08001748 memset(str, 0, size);
1749 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001750 if (ret)
1751 return -EFAULT;
1752
Tony Truongfdbd5672017-01-06 16:23:14 -08001753 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001754 testcase = (testcase * 10) + (str[i] - '0');
1755
1756 if (!rc_sel)
1757 rc_sel = 1;
1758
1759 pr_alert("PCIe: TEST: %d\n", testcase);
1760
1761 for (i = 0; i < MAX_RC_NUM; i++) {
1762 if (!((rc_sel >> i) & 0x1))
1763 continue;
1764 msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
1765 }
1766
1767 return count;
1768}
1769
1770const struct file_operations msm_pcie_cmd_debug_ops = {
1771 .write = msm_pcie_cmd_debug,
1772};
1773
1774static ssize_t msm_pcie_set_rc_sel(struct file *file,
1775 const char __user *buf,
1776 size_t count, loff_t *ppos)
1777{
1778 unsigned long ret;
1779 char str[MAX_MSG_LEN];
1780 int i;
1781 u32 new_rc_sel = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08001782 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001783
Tony Truongfdbd5672017-01-06 16:23:14 -08001784 memset(str, 0, size);
1785 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001786 if (ret)
1787 return -EFAULT;
1788
Tony Truongfdbd5672017-01-06 16:23:14 -08001789 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001790 new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
1791
1792 if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
1793 pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
1794 pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
1795 } else {
1796 rc_sel = new_rc_sel;
1797 pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
1798 }
1799
1800 pr_alert("PCIe: the following RC(s) will be tested:\n");
1801 for (i = 0; i < MAX_RC_NUM; i++) {
1802 if (!rc_sel) {
1803 pr_alert("RC %d\n", i);
1804 break;
1805 } else if (rc_sel & (1 << i)) {
1806 pr_alert("RC %d\n", i);
1807 }
1808 }
1809
1810 return count;
1811}
1812
1813const struct file_operations msm_pcie_rc_sel_ops = {
1814 .write = msm_pcie_set_rc_sel,
1815};
1816
1817static ssize_t msm_pcie_set_base_sel(struct file *file,
1818 const char __user *buf,
1819 size_t count, loff_t *ppos)
1820{
1821 unsigned long ret;
1822 char str[MAX_MSG_LEN];
1823 int i;
1824 u32 new_base_sel = 0;
1825 char *base_sel_name;
Tony Truongfdbd5672017-01-06 16:23:14 -08001826 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001827
Tony Truongfdbd5672017-01-06 16:23:14 -08001828 memset(str, 0, size);
1829 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001830 if (ret)
1831 return -EFAULT;
1832
Tony Truongfdbd5672017-01-06 16:23:14 -08001833 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001834 new_base_sel = (new_base_sel * 10) + (str[i] - '0');
1835
1836 if (!new_base_sel || new_base_sel > 5) {
1837 pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
1838 new_base_sel);
1839 pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
1840 } else {
1841 base_sel = new_base_sel;
1842 pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
1843 }
1844
1845 switch (base_sel) {
1846 case 1:
1847 base_sel_name = "PARF";
1848 break;
1849 case 2:
1850 base_sel_name = "PHY";
1851 break;
1852 case 3:
1853 base_sel_name = "RC CONFIG SPACE";
1854 break;
1855 case 4:
1856 base_sel_name = "ELBI";
1857 break;
1858 case 5:
1859 base_sel_name = "EP CONFIG SPACE";
1860 break;
1861 default:
1862 base_sel_name = "INVALID";
1863 break;
1864 }
1865
1866 pr_alert("%s\n", base_sel_name);
1867
1868 return count;
1869}
1870
1871const struct file_operations msm_pcie_base_sel_ops = {
1872 .write = msm_pcie_set_base_sel,
1873};
1874
1875static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
1876 const char __user *buf,
1877 size_t count, loff_t *ppos)
1878{
1879 unsigned long ret;
1880 char str[MAX_MSG_LEN];
1881 u32 new_linkdown_panic = 0;
1882 int i;
1883
1884 memset(str, 0, sizeof(str));
1885 ret = copy_from_user(str, buf, sizeof(str));
1886 if (ret)
1887 return -EFAULT;
1888
1889 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
1890 new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
1891
1892 if (new_linkdown_panic <= 1) {
1893 for (i = 0; i < MAX_RC_NUM; i++) {
1894 if (!rc_sel) {
1895 msm_pcie_dev[0].linkdown_panic =
1896 new_linkdown_panic;
1897 PCIE_DBG_FS(&msm_pcie_dev[0],
1898 "PCIe: RC0: linkdown_panic is now %d\n",
1899 msm_pcie_dev[0].linkdown_panic);
1900 break;
1901 } else if (rc_sel & (1 << i)) {
1902 msm_pcie_dev[i].linkdown_panic =
1903 new_linkdown_panic;
1904 PCIE_DBG_FS(&msm_pcie_dev[i],
1905 "PCIe: RC%d: linkdown_panic is now %d\n",
1906 i, msm_pcie_dev[i].linkdown_panic);
1907 }
1908 }
1909 } else {
1910 pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
1911 new_linkdown_panic);
1912 }
1913
1914 return count;
1915}
1916
1917const struct file_operations msm_pcie_linkdown_panic_ops = {
1918 .write = msm_pcie_set_linkdown_panic,
1919};
1920
1921static ssize_t msm_pcie_set_wr_offset(struct file *file,
1922 const char __user *buf,
1923 size_t count, loff_t *ppos)
1924{
1925 unsigned long ret;
1926 char str[MAX_MSG_LEN];
1927 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08001928 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001929
Tony Truongfdbd5672017-01-06 16:23:14 -08001930 memset(str, 0, size);
1931 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001932 if (ret)
1933 return -EFAULT;
1934
1935 wr_offset = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08001936 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001937 wr_offset = (wr_offset * 10) + (str[i] - '0');
1938
1939 pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
1940
1941 return count;
1942}
1943
1944const struct file_operations msm_pcie_wr_offset_ops = {
1945 .write = msm_pcie_set_wr_offset,
1946};
1947
1948static ssize_t msm_pcie_set_wr_mask(struct file *file,
1949 const char __user *buf,
1950 size_t count, loff_t *ppos)
1951{
1952 unsigned long ret;
1953 char str[MAX_MSG_LEN];
1954 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08001955 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001956
Tony Truongfdbd5672017-01-06 16:23:14 -08001957 memset(str, 0, size);
1958 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001959 if (ret)
1960 return -EFAULT;
1961
1962 wr_mask = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08001963 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001964 wr_mask = (wr_mask * 10) + (str[i] - '0');
1965
1966 pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
1967
1968 return count;
1969}
1970
1971const struct file_operations msm_pcie_wr_mask_ops = {
1972 .write = msm_pcie_set_wr_mask,
1973};
1974static ssize_t msm_pcie_set_wr_value(struct file *file,
1975 const char __user *buf,
1976 size_t count, loff_t *ppos)
1977{
1978 unsigned long ret;
1979 char str[MAX_MSG_LEN];
1980 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08001981 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07001982
Tony Truongfdbd5672017-01-06 16:23:14 -08001983 memset(str, 0, size);
1984 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07001985 if (ret)
1986 return -EFAULT;
1987
1988 wr_value = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08001989 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07001990 wr_value = (wr_value * 10) + (str[i] - '0');
1991
1992 pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
1993
1994 return count;
1995}
1996
1997const struct file_operations msm_pcie_wr_value_ops = {
1998 .write = msm_pcie_set_wr_value,
1999};
2000
Tony Truong9f2c7722017-02-28 15:02:27 -08002001static ssize_t msm_pcie_set_boot_option(struct file *file,
Tony Truong349ee492014-10-01 17:35:56 -07002002 const char __user *buf,
2003 size_t count, loff_t *ppos)
2004{
2005 unsigned long ret;
2006 char str[MAX_MSG_LEN];
Tony Truong9f2c7722017-02-28 15:02:27 -08002007 u32 new_boot_option = 0;
Tony Truong349ee492014-10-01 17:35:56 -07002008 int i;
2009
2010 memset(str, 0, sizeof(str));
2011 ret = copy_from_user(str, buf, sizeof(str));
2012 if (ret)
2013 return -EFAULT;
2014
2015 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong9f2c7722017-02-28 15:02:27 -08002016 new_boot_option = (new_boot_option * 10) + (str[i] - '0');
Tony Truong349ee492014-10-01 17:35:56 -07002017
Tony Truong9f2c7722017-02-28 15:02:27 -08002018 if (new_boot_option <= 1) {
Tony Truong349ee492014-10-01 17:35:56 -07002019 for (i = 0; i < MAX_RC_NUM; i++) {
2020 if (!rc_sel) {
Tony Truong9f2c7722017-02-28 15:02:27 -08002021 msm_pcie_dev[0].boot_option = new_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002022 PCIE_DBG_FS(&msm_pcie_dev[0],
Tony Truong9f2c7722017-02-28 15:02:27 -08002023 "PCIe: RC0: boot_option is now 0x%x\n",
2024 msm_pcie_dev[0].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002025 break;
2026 } else if (rc_sel & (1 << i)) {
Tony Truong9f2c7722017-02-28 15:02:27 -08002027 msm_pcie_dev[i].boot_option = new_boot_option;
Tony Truong349ee492014-10-01 17:35:56 -07002028 PCIE_DBG_FS(&msm_pcie_dev[i],
Tony Truong9f2c7722017-02-28 15:02:27 -08002029 "PCIe: RC%d: boot_option is now 0x%x\n",
2030 i, msm_pcie_dev[i].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002031 }
2032 }
2033 } else {
Tony Truong9f2c7722017-02-28 15:02:27 -08002034 pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
2035 new_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002036 }
2037
2038 return count;
2039}
2040
Tony Truong9f2c7722017-02-28 15:02:27 -08002041const struct file_operations msm_pcie_boot_option_ops = {
2042 .write = msm_pcie_set_boot_option,
Tony Truong349ee492014-10-01 17:35:56 -07002043};
2044
2045static ssize_t msm_pcie_set_aer_enable(struct file *file,
2046 const char __user *buf,
2047 size_t count, loff_t *ppos)
2048{
2049 unsigned long ret;
2050 char str[MAX_MSG_LEN];
2051 u32 new_aer_enable = 0;
2052 u32 temp_rc_sel;
2053 int i;
2054
2055 memset(str, 0, sizeof(str));
2056 ret = copy_from_user(str, buf, sizeof(str));
2057 if (ret)
2058 return -EFAULT;
2059
2060 for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
2061 new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
2062
2063 if (new_aer_enable > 1) {
2064 pr_err(
2065 "PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
2066 new_aer_enable);
2067 return count;
2068 }
2069
2070 if (rc_sel)
2071 temp_rc_sel = rc_sel;
2072 else
2073 temp_rc_sel = 0x1;
2074
2075 for (i = 0; i < MAX_RC_NUM; i++) {
2076 if (temp_rc_sel & (1 << i)) {
2077 msm_pcie_dev[i].aer_enable = new_aer_enable;
2078 PCIE_DBG_FS(&msm_pcie_dev[i],
2079 "PCIe: RC%d: aer_enable is now %d\n",
2080 i, msm_pcie_dev[i].aer_enable);
2081
2082 msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
2083 PCIE20_BRIDGE_CTRL,
2084 new_aer_enable ? 0 : BIT(16),
2085 new_aer_enable ? BIT(16) : 0);
2086
2087 PCIE_DBG_FS(&msm_pcie_dev[i],
2088 "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
2089 readl_relaxed(msm_pcie_dev[i].dm_core +
2090 PCIE20_BRIDGE_CTRL));
2091 }
2092 }
2093
2094 return count;
2095}
2096
2097const struct file_operations msm_pcie_aer_enable_ops = {
2098 .write = msm_pcie_set_aer_enable,
2099};
2100
2101static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
2102 const char __user *buf,
2103 size_t count, loff_t *ppos)
2104{
2105 unsigned long ret;
2106 char str[MAX_MSG_LEN];
2107 int i;
Tony Truongfdbd5672017-01-06 16:23:14 -08002108 u32 size = sizeof(str) < count ? sizeof(str) : count;
Tony Truong349ee492014-10-01 17:35:56 -07002109
Tony Truongfdbd5672017-01-06 16:23:14 -08002110 memset(str, 0, size);
2111 ret = copy_from_user(str, buf, size);
Tony Truong349ee492014-10-01 17:35:56 -07002112 if (ret)
2113 return -EFAULT;
2114
2115 corr_counter_limit = 0;
Tony Truongfdbd5672017-01-06 16:23:14 -08002116 for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
Tony Truong349ee492014-10-01 17:35:56 -07002117 corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
2118
2119 pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
2120
2121 return count;
2122}
2123
2124const struct file_operations msm_pcie_corr_counter_limit_ops = {
2125 .write = msm_pcie_set_corr_counter_limit,
2126};
2127
2128static void msm_pcie_debugfs_init(void)
2129{
2130 rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
2131 wr_mask = 0xffffffff;
2132
2133 dent_msm_pcie = debugfs_create_dir("pci-msm", 0);
2134 if (IS_ERR(dent_msm_pcie)) {
2135 pr_err("PCIe: fail to create the folder for debug_fs.\n");
2136 return;
2137 }
2138
2139 dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
2140 dent_msm_pcie, 0,
2141 &msm_pcie_rc_sel_ops);
2142 if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
2143 pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
2144 goto rc_sel_error;
2145 }
2146
2147 dfile_case = debugfs_create_file("case", 0664,
2148 dent_msm_pcie, 0,
2149 &msm_pcie_cmd_debug_ops);
2150 if (!dfile_case || IS_ERR(dfile_case)) {
2151 pr_err("PCIe: fail to create the file for debug_fs case.\n");
2152 goto case_error;
2153 }
2154
2155 dfile_base_sel = debugfs_create_file("base_sel", 0664,
2156 dent_msm_pcie, 0,
2157 &msm_pcie_base_sel_ops);
2158 if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
2159 pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
2160 goto base_sel_error;
2161 }
2162
2163 dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
2164 dent_msm_pcie, 0,
2165 &msm_pcie_linkdown_panic_ops);
2166 if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
2167 pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
2168 goto linkdown_panic_error;
2169 }
2170
2171 dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
2172 dent_msm_pcie, 0,
2173 &msm_pcie_wr_offset_ops);
2174 if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
2175 pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
2176 goto wr_offset_error;
2177 }
2178
2179 dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
2180 dent_msm_pcie, 0,
2181 &msm_pcie_wr_mask_ops);
2182 if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
2183 pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
2184 goto wr_mask_error;
2185 }
2186
2187 dfile_wr_value = debugfs_create_file("wr_value", 0664,
2188 dent_msm_pcie, 0,
2189 &msm_pcie_wr_value_ops);
2190 if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
2191 pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
2192 goto wr_value_error;
2193 }
2194
Tony Truong9f2c7722017-02-28 15:02:27 -08002195 dfile_boot_option = debugfs_create_file("boot_option", 0664,
Tony Truong349ee492014-10-01 17:35:56 -07002196 dent_msm_pcie, 0,
Tony Truong9f2c7722017-02-28 15:02:27 -08002197 &msm_pcie_boot_option_ops);
2198 if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
2199 pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
2200 goto boot_option_error;
Tony Truong349ee492014-10-01 17:35:56 -07002201 }
2202
2203 dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
2204 dent_msm_pcie, 0,
2205 &msm_pcie_aer_enable_ops);
2206 if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
2207 pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
2208 goto aer_enable_error;
2209 }
2210
2211 dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
2212 0664, dent_msm_pcie, 0,
2213 &msm_pcie_corr_counter_limit_ops);
2214 if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
2215 pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
2216 goto corr_counter_limit_error;
2217 }
2218 return;
2219
2220corr_counter_limit_error:
2221 debugfs_remove(dfile_aer_enable);
2222aer_enable_error:
Tony Truong9f2c7722017-02-28 15:02:27 -08002223 debugfs_remove(dfile_boot_option);
2224boot_option_error:
Tony Truong349ee492014-10-01 17:35:56 -07002225 debugfs_remove(dfile_wr_value);
2226wr_value_error:
2227 debugfs_remove(dfile_wr_mask);
2228wr_mask_error:
2229 debugfs_remove(dfile_wr_offset);
2230wr_offset_error:
2231 debugfs_remove(dfile_linkdown_panic);
2232linkdown_panic_error:
2233 debugfs_remove(dfile_base_sel);
2234base_sel_error:
2235 debugfs_remove(dfile_case);
2236case_error:
2237 debugfs_remove(dfile_rc_sel);
2238rc_sel_error:
2239 debugfs_remove(dent_msm_pcie);
2240}
2241
2242static void msm_pcie_debugfs_exit(void)
2243{
2244 debugfs_remove(dfile_rc_sel);
2245 debugfs_remove(dfile_case);
2246 debugfs_remove(dfile_base_sel);
2247 debugfs_remove(dfile_linkdown_panic);
2248 debugfs_remove(dfile_wr_offset);
2249 debugfs_remove(dfile_wr_mask);
2250 debugfs_remove(dfile_wr_value);
Tony Truong9f2c7722017-02-28 15:02:27 -08002251 debugfs_remove(dfile_boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07002252 debugfs_remove(dfile_aer_enable);
2253 debugfs_remove(dfile_corr_counter_limit);
2254}
2255#else
2256static void msm_pcie_debugfs_init(void)
2257{
2258}
2259
2260static void msm_pcie_debugfs_exit(void)
2261{
2262}
2263#endif
2264
2265static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
2266{
2267 return readl_relaxed(dev->dm_core +
2268 PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
2269}
2270
2271/**
2272 * msm_pcie_iatu_config - configure outbound address translation region
2273 * @dev: root commpex
2274 * @nr: region number
2275 * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
2276 * @host_addr: - region start address on host
2277 * @host_end: - region end address (low 32 bit) on host,
2278 * upper 32 bits are same as for @host_addr
2279 * @target_addr: - region start address on target
2280 */
2281static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
2282 unsigned long host_addr, u32 host_end,
2283 unsigned long target_addr)
2284{
2285 void __iomem *pcie20 = dev->dm_core;
2286
2287 if (dev->shadow_en) {
2288 dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
2289 nr;
2290 dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
2291 type;
2292 dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
2293 lower_32_bits(host_addr);
2294 dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
2295 upper_32_bits(host_addr);
2296 dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
2297 host_end;
2298 dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
2299 lower_32_bits(target_addr);
2300 dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
2301 upper_32_bits(target_addr);
2302 dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
2303 BIT(31);
2304 }
2305
2306 /* select region */
2307 writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
2308 /* ensure that hardware locks it */
2309 wmb();
2310
2311 /* switch off region before changing it */
2312 writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
2313 /* and wait till it propagates to the hardware */
2314 wmb();
2315
2316 writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
2317 writel_relaxed(lower_32_bits(host_addr),
2318 pcie20 + PCIE20_PLR_IATU_LBAR);
2319 writel_relaxed(upper_32_bits(host_addr),
2320 pcie20 + PCIE20_PLR_IATU_UBAR);
2321 writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
2322 writel_relaxed(lower_32_bits(target_addr),
2323 pcie20 + PCIE20_PLR_IATU_LTAR);
2324 writel_relaxed(upper_32_bits(target_addr),
2325 pcie20 + PCIE20_PLR_IATU_UTAR);
2326 /* ensure that changes propagated to the hardware */
2327 wmb();
2328 writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
2329
2330 /* ensure that changes propagated to the hardware */
2331 wmb();
2332
2333 if (dev->enumerated) {
2334 PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
2335 dev->pcidev_table[nr].bdf >> 24,
2336 dev->pcidev_table[nr].bdf >> 19 & 0x1f,
2337 dev->pcidev_table[nr].bdf >> 16 & 0x07);
2338 PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
2339 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
2340 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
2341 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
2342 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
2343 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
2344 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
2345 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
2346 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
2347 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
2348 PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
2349 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
2350 PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
2351 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
2352 PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
2353 readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
2354 }
2355}
2356
2357/**
2358 * msm_pcie_cfg_bdf - configure for config access
2359 * @dev: root commpex
2360 * @bus: PCI bus number
2361 * @devfn: PCI dev and function number
2362 *
2363 * Remap if required region 0 for config access of proper type
2364 * (CFG0 for bus 1, CFG1 for other buses)
2365 * Cache current device bdf for speed-up
2366 */
2367static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
2368{
2369 struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
2370 u32 bdf = BDF_OFFSET(bus, devfn);
2371 u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
2372
2373 if (dev->current_bdf == bdf)
2374 return;
2375
2376 msm_pcie_iatu_config(dev, 0, type,
2377 axi_conf->start,
2378 axi_conf->start + SZ_4K - 1,
2379 bdf);
2380
2381 dev->current_bdf = bdf;
2382}
2383
2384static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
2385 u32 word_offset, u32 wr_val,
2386 u32 bdf, bool rc)
2387{
2388 int i, j;
2389 u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
2390
2391 if (rc) {
2392 dev->rc_shadow[word_offset / 4] = wr_val;
2393 } else {
2394 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2395 if (!dev->pcidev_table[i].bdf) {
2396 for (j = 0; j < max_dev; j++)
2397 if (!msm_pcie_dev_tbl[j].bdf) {
2398 msm_pcie_dev_tbl[j].bdf = bdf;
2399 break;
2400 }
2401 dev->pcidev_table[i].bdf = bdf;
2402 if ((!dev->bridge_found) && (i > 0))
2403 dev->bridge_found = true;
2404 }
2405 if (dev->pcidev_table[i].bdf == bdf) {
2406 dev->ep_shadow[i][word_offset / 4] = wr_val;
2407 break;
2408 }
2409 }
2410 }
2411}
2412
2413static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
2414 int where, int size, u32 *val)
2415{
2416 uint32_t word_offset, byte_offset, mask;
2417 uint32_t rd_val, wr_val;
2418 struct msm_pcie_dev_t *dev;
2419 void __iomem *config_base;
2420 bool rc = false;
2421 u32 rc_idx;
2422 int rv = 0;
2423 u32 bdf = BDF_OFFSET(bus->number, devfn);
2424 int i;
2425
2426 dev = PCIE_BUS_PRIV_DATA(bus);
2427
2428 if (!dev) {
2429 pr_err("PCIe: No device found for this bus.\n");
2430 *val = ~0;
2431 rv = PCIBIOS_DEVICE_NOT_FOUND;
2432 goto out;
2433 }
2434
2435 rc_idx = dev->rc_idx;
2436 rc = (bus->number == 0);
2437
2438 spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
2439
2440 if (!dev->cfg_access) {
2441 PCIE_DBG3(dev,
2442 "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
2443 rc_idx, bus->number, devfn, where, size);
2444 *val = ~0;
2445 rv = PCIBIOS_DEVICE_NOT_FOUND;
2446 goto unlock;
2447 }
2448
2449 if (rc && (devfn != 0)) {
2450 PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
2451 (oper == RD) ? "rd" : "wr", bus->number, devfn);
2452 *val = ~0;
2453 rv = PCIBIOS_DEVICE_NOT_FOUND;
2454 goto unlock;
2455 }
2456
2457 if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
2458 PCIE_DBG3(dev,
2459 "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
2460 rc_idx, bus->number, devfn, where, size);
2461 *val = ~0;
2462 rv = PCIBIOS_DEVICE_NOT_FOUND;
2463 goto unlock;
2464 }
2465
2466 /* check if the link is up for endpoint */
2467 if (!rc && !msm_pcie_is_link_up(dev)) {
2468 PCIE_ERR(dev,
2469 "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
2470 rc_idx, (oper == RD) ? "rd" : "wr",
2471 bus->number, devfn);
2472 *val = ~0;
2473 rv = PCIBIOS_DEVICE_NOT_FOUND;
2474 goto unlock;
2475 }
2476
2477 if (!rc && !dev->enumerated)
2478 msm_pcie_cfg_bdf(dev, bus->number, devfn);
2479
2480 word_offset = where & ~0x3;
2481 byte_offset = where & 0x3;
Tony Truonge48ec872017-03-14 12:47:58 -07002482 mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
Tony Truong349ee492014-10-01 17:35:56 -07002483
2484 if (rc || !dev->enumerated) {
2485 config_base = rc ? dev->dm_core : dev->conf;
2486 } else {
2487 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2488 if (dev->pcidev_table[i].bdf == bdf) {
2489 config_base = dev->pcidev_table[i].conf_base;
2490 break;
2491 }
2492 }
2493 if (i == MAX_DEVICE_NUM) {
2494 *val = ~0;
2495 rv = PCIBIOS_DEVICE_NOT_FOUND;
2496 goto unlock;
2497 }
2498 }
2499
2500 rd_val = readl_relaxed(config_base + word_offset);
2501
2502 if (oper == RD) {
2503 *val = ((rd_val & mask) >> (8 * byte_offset));
2504 PCIE_DBG3(dev,
2505 "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
2506 rc_idx, bus->number, devfn, where, size, *val, rd_val);
2507 } else {
2508 wr_val = (rd_val & ~mask) |
2509 ((*val << (8 * byte_offset)) & mask);
2510
2511 if ((bus->number == 0) && (where == 0x3c))
2512 wr_val = wr_val | (3 << 16);
2513
2514 writel_relaxed(wr_val, config_base + word_offset);
2515 wmb(); /* ensure config data is written to hardware register */
2516
Tony Truonge48ec872017-03-14 12:47:58 -07002517 if (dev->shadow_en) {
2518 if (rd_val == PCIE_LINK_DOWN &&
2519 (readl_relaxed(config_base) == PCIE_LINK_DOWN))
2520 PCIE_ERR(dev,
2521 "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
2522 rc_idx, bus->number, devfn,
2523 where, size);
2524 else
2525 msm_pcie_save_shadow(dev, word_offset, wr_val,
2526 bdf, rc);
2527 }
Tony Truong349ee492014-10-01 17:35:56 -07002528
2529 PCIE_DBG3(dev,
2530 "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
2531 rc_idx, bus->number, devfn, where, size,
2532 wr_val, rd_val, *val);
2533 }
2534
2535unlock:
2536 spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
2537out:
2538 return rv;
2539}
2540
2541static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
2542 int size, u32 *val)
2543{
2544 int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
2545
2546 if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
2547 *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
2548 PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
2549 }
2550
2551 return ret;
2552}
2553
2554static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2555 int where, int size, u32 val)
2556{
2557 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
2558}
2559
2560static struct pci_ops msm_pcie_ops = {
2561 .read = msm_pcie_rd_conf,
2562 .write = msm_pcie_wr_conf,
2563};
2564
2565static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
2566{
2567 int rc = 0, i;
2568 struct msm_pcie_gpio_info_t *info;
2569
2570 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2571
2572 for (i = 0; i < dev->gpio_n; i++) {
2573 info = &dev->gpio[i];
2574
2575 if (!info->num)
2576 continue;
2577
2578 rc = gpio_request(info->num, info->name);
2579 if (rc) {
2580 PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
2581 dev->rc_idx, info->name, rc);
2582 break;
2583 }
2584
2585 if (info->out)
2586 rc = gpio_direction_output(info->num, info->init);
2587 else
2588 rc = gpio_direction_input(info->num);
2589 if (rc) {
2590 PCIE_ERR(dev,
2591 "PCIe: RC%d can't set direction for GPIO %s:%d\n",
2592 dev->rc_idx, info->name, rc);
2593 gpio_free(info->num);
2594 break;
2595 }
2596 }
2597
2598 if (rc)
2599 while (i--)
2600 gpio_free(dev->gpio[i].num);
2601
2602 return rc;
2603}
2604
2605static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
2606{
2607 int i;
2608
2609 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2610
2611 for (i = 0; i < dev->gpio_n; i++)
2612 gpio_free(dev->gpio[i].num);
2613}
2614
2615int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
2616{
2617 int i, rc = 0;
2618 struct regulator *vreg;
2619 struct msm_pcie_vreg_info_t *info;
2620
2621 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2622
2623 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
2624 info = &dev->vreg[i];
2625 vreg = info->hdl;
2626
2627 if (!vreg)
2628 continue;
2629
2630 PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
2631 dev->rc_idx, info->name);
2632 if (info->max_v) {
2633 rc = regulator_set_voltage(vreg,
2634 info->min_v, info->max_v);
2635 if (rc) {
2636 PCIE_ERR(dev,
2637 "PCIe: RC%d can't set voltage for %s: %d\n",
2638 dev->rc_idx, info->name, rc);
2639 break;
2640 }
2641 }
2642
2643 if (info->opt_mode) {
2644 rc = regulator_set_load(vreg, info->opt_mode);
2645 if (rc < 0) {
2646 PCIE_ERR(dev,
2647 "PCIe: RC%d can't set mode for %s: %d\n",
2648 dev->rc_idx, info->name, rc);
2649 break;
2650 }
2651 }
2652
2653 rc = regulator_enable(vreg);
2654 if (rc) {
2655 PCIE_ERR(dev,
2656 "PCIe: RC%d can't enable regulator %s: %d\n",
2657 dev->rc_idx, info->name, rc);
2658 break;
2659 }
2660 }
2661
2662 if (rc)
2663 while (i--) {
2664 struct regulator *hdl = dev->vreg[i].hdl;
2665
2666 if (hdl) {
2667 regulator_disable(hdl);
2668 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
2669 PCIE_DBG(dev,
2670 "RC%d: Removing %s vote.\n",
2671 dev->rc_idx,
2672 dev->vreg[i].name);
2673 regulator_set_voltage(hdl,
Tony Truongb213ac12017-04-05 15:21:20 -07002674 RPMH_REGULATOR_LEVEL_OFF,
2675 RPMH_REGULATOR_LEVEL_MAX);
Tony Truong349ee492014-10-01 17:35:56 -07002676 }
2677 }
2678
2679 }
2680
2681 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2682
2683 return rc;
2684}
2685
2686static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
2687{
2688 int i;
2689
2690 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2691
2692 for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
2693 if (dev->vreg[i].hdl) {
2694 PCIE_DBG(dev, "Vreg %s is being disabled\n",
2695 dev->vreg[i].name);
2696 regulator_disable(dev->vreg[i].hdl);
2697
2698 if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
2699 PCIE_DBG(dev,
2700 "RC%d: Removing %s vote.\n",
2701 dev->rc_idx,
2702 dev->vreg[i].name);
2703 regulator_set_voltage(dev->vreg[i].hdl,
Tony Truongb213ac12017-04-05 15:21:20 -07002704 RPMH_REGULATOR_LEVEL_OFF,
2705 RPMH_REGULATOR_LEVEL_MAX);
Tony Truong349ee492014-10-01 17:35:56 -07002706 }
2707 }
2708 }
2709
2710 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2711}
2712
2713static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
2714{
2715 int i, rc = 0;
2716 struct msm_pcie_clk_info_t *info;
2717 struct msm_pcie_reset_info_t *reset_info;
2718
2719 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2720
2721 rc = regulator_enable(dev->gdsc);
2722
2723 if (rc) {
2724 PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
2725 dev->rc_idx, dev->pdev->name);
2726 return rc;
2727 }
2728
2729 if (dev->gdsc_smmu) {
2730 rc = regulator_enable(dev->gdsc_smmu);
2731
2732 if (rc) {
2733 PCIE_ERR(dev,
2734 "PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
2735 dev->rc_idx, dev->pdev->name);
2736 return rc;
2737 }
2738 }
2739
2740 PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
2741 if (dev->bus_client) {
2742 rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
2743 if (rc) {
2744 PCIE_ERR(dev,
2745 "PCIe: fail to set bus bandwidth for RC%d:%d.\n",
2746 dev->rc_idx, rc);
2747 return rc;
2748 }
2749
2750 PCIE_DBG2(dev,
2751 "PCIe: set bus bandwidth for RC%d.\n",
2752 dev->rc_idx);
2753 }
2754
2755 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
2756 info = &dev->clk[i];
2757
2758 if (!info->hdl)
2759 continue;
2760
2761 if (info->config_mem)
2762 msm_pcie_config_clock_mem(dev, info);
2763
2764 if (info->freq) {
2765 rc = clk_set_rate(info->hdl, info->freq);
2766 if (rc) {
2767 PCIE_ERR(dev,
2768 "PCIe: RC%d can't set rate for clk %s: %d.\n",
2769 dev->rc_idx, info->name, rc);
2770 break;
2771 }
2772
2773 PCIE_DBG2(dev,
2774 "PCIe: RC%d set rate for clk %s.\n",
2775 dev->rc_idx, info->name);
2776 }
2777
2778 rc = clk_prepare_enable(info->hdl);
2779
2780 if (rc)
2781 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
2782 dev->rc_idx, info->name);
2783 else
2784 PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
2785 info->name, dev->rc_idx);
2786 }
2787
2788 if (rc) {
2789 PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
2790 dev->rc_idx);
2791 while (i--) {
2792 struct clk *hdl = dev->clk[i].hdl;
2793
2794 if (hdl)
2795 clk_disable_unprepare(hdl);
2796 }
2797
2798 if (dev->gdsc_smmu)
2799 regulator_disable(dev->gdsc_smmu);
2800
2801 regulator_disable(dev->gdsc);
2802 }
2803
2804 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
2805 reset_info = &dev->reset[i];
2806 if (reset_info->hdl) {
Tony Truongc21087d2017-05-01 17:58:06 -07002807 rc = reset_control_assert(reset_info->hdl);
2808 if (rc)
2809 PCIE_ERR(dev,
2810 "PCIe: RC%d failed to assert reset for %s.\n",
2811 dev->rc_idx, reset_info->name);
2812 else
2813 PCIE_DBG2(dev,
2814 "PCIe: RC%d successfully asserted reset for %s.\n",
2815 dev->rc_idx, reset_info->name);
2816
2817 /* add a 1ms delay to ensure the reset is asserted */
2818 usleep_range(1000, 1005);
2819
Tony Truong349ee492014-10-01 17:35:56 -07002820 rc = reset_control_deassert(reset_info->hdl);
2821 if (rc)
2822 PCIE_ERR(dev,
2823 "PCIe: RC%d failed to deassert reset for %s.\n",
2824 dev->rc_idx, reset_info->name);
2825 else
2826 PCIE_DBG2(dev,
2827 "PCIe: RC%d successfully deasserted reset for %s.\n",
2828 dev->rc_idx, reset_info->name);
2829 }
2830 }
2831
2832 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2833
2834 return rc;
2835}
2836
2837static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
2838{
2839 int i;
2840 int rc;
2841
2842 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2843
2844 for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
2845 if (dev->clk[i].hdl)
2846 clk_disable_unprepare(dev->clk[i].hdl);
2847
2848 if (dev->bus_client) {
2849 PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
2850 dev->rc_idx);
2851
2852 rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
2853 if (rc)
2854 PCIE_ERR(dev,
2855 "PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
2856 dev->rc_idx, rc);
2857 else
2858 PCIE_DBG(dev,
2859 "PCIe: relinquish bus bandwidth for RC%d.\n",
2860 dev->rc_idx);
2861 }
2862
2863 if (dev->gdsc_smmu)
2864 regulator_disable(dev->gdsc_smmu);
2865
2866 regulator_disable(dev->gdsc);
2867
2868 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2869}
2870
2871static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
2872{
2873 int i, rc = 0;
2874 struct msm_pcie_clk_info_t *info;
2875 struct msm_pcie_reset_info_t *pipe_reset_info;
2876
2877 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2878
2879 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
2880 info = &dev->pipeclk[i];
2881
2882 if (!info->hdl)
2883 continue;
2884
2885
2886 if (info->config_mem)
2887 msm_pcie_config_clock_mem(dev, info);
2888
2889 if (info->freq) {
2890 rc = clk_set_rate(info->hdl, info->freq);
2891 if (rc) {
2892 PCIE_ERR(dev,
2893 "PCIe: RC%d can't set rate for clk %s: %d.\n",
2894 dev->rc_idx, info->name, rc);
2895 break;
2896 }
2897
2898 PCIE_DBG2(dev,
2899 "PCIe: RC%d set rate for clk %s: %d.\n",
2900 dev->rc_idx, info->name, rc);
2901 }
2902
2903 rc = clk_prepare_enable(info->hdl);
2904
2905 if (rc)
2906 PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
2907 dev->rc_idx, info->name);
2908 else
2909 PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
2910 dev->rc_idx, info->name);
2911 }
2912
2913 if (rc) {
2914 PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
2915 dev->rc_idx);
2916 while (i--)
2917 if (dev->pipeclk[i].hdl)
2918 clk_disable_unprepare(dev->pipeclk[i].hdl);
2919 }
2920
2921 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
2922 pipe_reset_info = &dev->pipe_reset[i];
2923 if (pipe_reset_info->hdl) {
Tony Truongc21087d2017-05-01 17:58:06 -07002924 rc = reset_control_assert(pipe_reset_info->hdl);
2925 if (rc)
2926 PCIE_ERR(dev,
2927 "PCIe: RC%d failed to assert pipe reset for %s.\n",
2928 dev->rc_idx, pipe_reset_info->name);
2929 else
2930 PCIE_DBG2(dev,
2931 "PCIe: RC%d successfully asserted pipe reset for %s.\n",
2932 dev->rc_idx, pipe_reset_info->name);
2933
2934 /* add a 1ms delay to ensure the reset is asserted */
2935 usleep_range(1000, 1005);
2936
Tony Truong349ee492014-10-01 17:35:56 -07002937 rc = reset_control_deassert(
2938 pipe_reset_info->hdl);
2939 if (rc)
2940 PCIE_ERR(dev,
2941 "PCIe: RC%d failed to deassert pipe reset for %s.\n",
2942 dev->rc_idx, pipe_reset_info->name);
2943 else
2944 PCIE_DBG2(dev,
2945 "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
2946 dev->rc_idx, pipe_reset_info->name);
2947 }
2948 }
2949
2950 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2951
2952 return rc;
2953}
2954
2955static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
2956{
2957 int i;
2958
2959 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
2960
2961 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
2962 if (dev->pipeclk[i].hdl)
2963 clk_disable_unprepare(
2964 dev->pipeclk[i].hdl);
2965
2966 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
2967}
2968
2969static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
2970{
2971 int i;
2972 u8 type;
2973 struct msm_pcie_device_info *dev_table = dev->pcidev_table;
2974
2975 for (i = 0; i < MAX_DEVICE_NUM; i++) {
2976 if (!dev_table[i].bdf)
2977 break;
2978
2979 type = dev_table[i].bdf >> 24 == 0x1 ?
2980 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
2981
2982 msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
2983 dev_table[i].phy_address + SZ_4K - 1,
2984 dev_table[i].bdf);
2985 }
2986}
2987
2988static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
2989{
2990 int i;
2991
2992 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
2993
2994 /*
2995 * program and enable address translation region 0 (device config
2996 * address space); region type config;
2997 * axi config address range to device config address range
2998 */
2999 if (dev->enumerated) {
3000 msm_pcie_iatu_config_all_ep(dev);
3001 } else {
3002 dev->current_bdf = 0; /* to force IATU re-config */
3003 msm_pcie_cfg_bdf(dev, 1, 0);
3004 }
3005
3006 /* configure N_FTS */
3007 PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3008 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3009 if (!dev->n_fts)
3010 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3011 0, BIT(15));
3012 else
3013 msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
3014 PCIE20_ACK_N_FTS,
3015 dev->n_fts << 8);
3016
3017 if (dev->shadow_en)
3018 dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
3019 readl_relaxed(dev->dm_core +
3020 PCIE20_ACK_F_ASPM_CTRL_REG);
3021
3022 PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
3023 readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
3024
3025 /* configure AUX clock frequency register for PCIe core */
3026 if (dev->use_19p2mhz_aux_clk)
3027 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
3028 else
3029 msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
3030
3031 /* configure the completion timeout value for PCIe core */
3032 if (dev->cpl_timeout && dev->bridge_found)
3033 msm_pcie_write_reg_field(dev->dm_core,
3034 PCIE20_DEVICE_CONTROL2_STATUS2,
3035 0xf, dev->cpl_timeout);
3036
3037 /* Enable AER on RC */
3038 if (dev->aer_enable) {
3039 msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
3040 BIT(16)|BIT(17));
3041 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
3042 BIT(3)|BIT(2)|BIT(1)|BIT(0));
3043
3044 PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
3045 readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
3046 }
3047
3048 /* configure SMMU registers */
3049 if (dev->smmu_exist) {
3050 msm_pcie_write_reg(dev->parf,
3051 PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
3052 msm_pcie_write_reg(dev->parf,
3053 PCIE20_PARF_SID_OFFSET, 0);
3054
3055 if (dev->enumerated) {
3056 for (i = 0; i < MAX_DEVICE_NUM; i++) {
3057 if (dev->pcidev_table[i].dev &&
3058 dev->pcidev_table[i].short_bdf) {
3059 msm_pcie_write_reg(dev->parf,
3060 PCIE20_PARF_BDF_TRANSLATE_N +
3061 dev->pcidev_table[i].short_bdf
3062 * 4,
3063 dev->pcidev_table[i].bdf >> 16);
3064 }
3065 }
3066 }
3067 }
3068}
3069
3070static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
3071{
3072 u32 val;
3073 u32 current_offset;
3074 u32 ep_l1sub_ctrl1_offset = 0;
3075 u32 ep_l1sub_cap_reg1_offset = 0;
3076 u32 ep_link_cap_offset = 0;
3077 u32 ep_link_ctrlstts_offset = 0;
3078 u32 ep_dev_ctrl2stts2_offset = 0;
3079
3080 /* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/
3081 if (!dev->aux_clk_sync && dev->l1ss_supported)
3082 msm_pcie_write_mask(dev->parf +
3083 PCIE20_PARF_SYS_CTRL, BIT(3), 0);
3084
3085 current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
3086
3087 while (current_offset) {
3088 if (msm_pcie_check_align(dev, current_offset))
3089 return;
3090
3091 val = readl_relaxed(dev->conf + current_offset);
3092 if ((val & 0xff) == PCIE20_CAP_ID) {
3093 ep_link_cap_offset = current_offset + 0x0c;
3094 ep_link_ctrlstts_offset = current_offset + 0x10;
3095 ep_dev_ctrl2stts2_offset = current_offset + 0x28;
3096 break;
3097 }
3098 current_offset = (val >> 8) & 0xff;
3099 }
3100
3101 if (!ep_link_cap_offset) {
3102 PCIE_DBG(dev,
3103 "RC%d endpoint does not support PCIe capability registers\n",
3104 dev->rc_idx);
3105 return;
3106 }
3107
3108 PCIE_DBG(dev,
3109 "RC%d: ep_link_cap_offset: 0x%x\n",
3110 dev->rc_idx, ep_link_cap_offset);
3111
3112 if (dev->common_clk_en) {
3113 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3114 0, BIT(6));
3115
3116 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3117 0, BIT(6));
3118
3119 if (dev->shadow_en) {
3120 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3121 readl_relaxed(dev->dm_core +
3122 PCIE20_CAP_LINKCTRLSTATUS);
3123
3124 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3125 readl_relaxed(dev->conf +
3126 ep_link_ctrlstts_offset);
3127 }
3128
3129 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3130 readl_relaxed(dev->dm_core +
3131 PCIE20_CAP_LINKCTRLSTATUS));
3132 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3133 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3134 }
3135
3136 if (dev->clk_power_manage_en) {
3137 val = readl_relaxed(dev->conf + ep_link_cap_offset);
3138 if (val & BIT(18)) {
3139 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3140 0, BIT(8));
3141
3142 if (dev->shadow_en)
3143 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3144 readl_relaxed(dev->conf +
3145 ep_link_ctrlstts_offset);
3146
3147 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3148 readl_relaxed(dev->conf +
3149 ep_link_ctrlstts_offset));
3150 }
3151 }
3152
3153 if (dev->l0s_supported) {
3154 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3155 0, BIT(0));
3156 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3157 0, BIT(0));
3158 if (dev->shadow_en) {
3159 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3160 readl_relaxed(dev->dm_core +
3161 PCIE20_CAP_LINKCTRLSTATUS);
3162 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3163 readl_relaxed(dev->conf +
3164 ep_link_ctrlstts_offset);
3165 }
3166 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3167 readl_relaxed(dev->dm_core +
3168 PCIE20_CAP_LINKCTRLSTATUS));
3169 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3170 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3171 }
3172
3173 if (dev->l1_supported) {
3174 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
3175 0, BIT(1));
3176 msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
3177 0, BIT(1));
3178 if (dev->shadow_en) {
3179 dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
3180 readl_relaxed(dev->dm_core +
3181 PCIE20_CAP_LINKCTRLSTATUS);
3182 dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
3183 readl_relaxed(dev->conf +
3184 ep_link_ctrlstts_offset);
3185 }
3186 PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
3187 readl_relaxed(dev->dm_core +
3188 PCIE20_CAP_LINKCTRLSTATUS));
3189 PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
3190 readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
3191 }
3192
3193 if (dev->l1ss_supported) {
3194 current_offset = PCIE_EXT_CAP_OFFSET;
3195 while (current_offset) {
3196 if (msm_pcie_check_align(dev, current_offset))
3197 return;
3198
3199 val = readl_relaxed(dev->conf + current_offset);
3200 if ((val & 0xffff) == L1SUB_CAP_ID) {
3201 ep_l1sub_cap_reg1_offset = current_offset + 0x4;
3202 ep_l1sub_ctrl1_offset = current_offset + 0x8;
3203 break;
3204 }
3205 current_offset = val >> 20;
3206 }
3207 if (!ep_l1sub_ctrl1_offset) {
3208 PCIE_DBG(dev,
3209 "RC%d endpoint does not support l1ss registers\n",
3210 dev->rc_idx);
3211 return;
3212 }
3213
3214 val = readl_relaxed(dev->conf + ep_l1sub_cap_reg1_offset);
3215
3216 PCIE_DBG2(dev, "EP's L1SUB_CAPABILITY_REG_1: 0x%x\n", val);
3217 PCIE_DBG2(dev, "RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
3218 dev->rc_idx, ep_l1sub_ctrl1_offset);
3219
3220 val &= 0xf;
3221
3222 msm_pcie_write_reg_field(dev->dm_core, PCIE20_L1SUB_CONTROL1,
3223 0xf, val);
3224 msm_pcie_write_mask(dev->dm_core +
3225 PCIE20_DEVICE_CONTROL2_STATUS2,
3226 0, BIT(10));
3227 msm_pcie_write_reg_field(dev->conf, ep_l1sub_ctrl1_offset,
3228 0xf, val);
3229 msm_pcie_write_mask(dev->conf + ep_dev_ctrl2stts2_offset,
3230 0, BIT(10));
3231 if (dev->shadow_en) {
3232 dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
3233 readl_relaxed(dev->dm_core +
3234 PCIE20_L1SUB_CONTROL1);
3235 dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
3236 readl_relaxed(dev->dm_core +
3237 PCIE20_DEVICE_CONTROL2_STATUS2);
3238 dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
3239 readl_relaxed(dev->conf +
3240 ep_l1sub_ctrl1_offset);
3241 dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
3242 readl_relaxed(dev->conf +
3243 ep_dev_ctrl2stts2_offset);
3244 }
3245 PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n",
3246 readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1));
3247 PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
3248 readl_relaxed(dev->dm_core +
3249 PCIE20_DEVICE_CONTROL2_STATUS2));
3250 PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n",
3251 readl_relaxed(dev->conf + ep_l1sub_ctrl1_offset));
3252 PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
3253 readl_relaxed(dev->conf +
3254 ep_dev_ctrl2stts2_offset));
3255 }
3256}
3257
3258void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
3259{
3260 int i;
3261
3262 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
3263
3264 /* program MSI controller and enable all interrupts */
3265 writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
3266 writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
3267
3268 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
3269 writel_relaxed(~0, dev->dm_core +
3270 PCIE20_MSI_CTRL_INTR_EN + (i * 12));
3271
3272 /* ensure that hardware is configured before proceeding */
3273 wmb();
3274}
3275
3276static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
3277 struct platform_device *pdev)
3278{
3279 int i, len, cnt, ret = 0, size = 0;
3280 struct msm_pcie_vreg_info_t *vreg_info;
3281 struct msm_pcie_gpio_info_t *gpio_info;
3282 struct msm_pcie_clk_info_t *clk_info;
3283 struct resource *res;
3284 struct msm_pcie_res_info_t *res_info;
3285 struct msm_pcie_irq_info_t *irq_info;
3286 struct msm_pcie_irq_info_t *msi_info;
3287 struct msm_pcie_reset_info_t *reset_info;
3288 struct msm_pcie_reset_info_t *pipe_reset_info;
3289 char prop_name[MAX_PROP_SIZE];
3290 const __be32 *prop;
3291 u32 *clkfreq = NULL;
3292
3293 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3294
3295 cnt = of_property_count_strings((&pdev->dev)->of_node,
3296 "clock-names");
3297 if (cnt > 0) {
3298 clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
3299 sizeof(*clkfreq), GFP_KERNEL);
3300 if (!clkfreq) {
3301 PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
3302 dev->rc_idx);
3303 return -ENOMEM;
3304 }
3305 ret = of_property_read_u32_array(
3306 (&pdev->dev)->of_node,
3307 "max-clock-frequency-hz", clkfreq, cnt);
3308 if (ret) {
3309 PCIE_ERR(dev,
3310 "PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
3311 dev->rc_idx, ret);
3312 goto out;
3313 }
3314 }
3315
3316 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
3317 vreg_info = &dev->vreg[i];
3318 vreg_info->hdl =
3319 devm_regulator_get(&pdev->dev, vreg_info->name);
3320
3321 if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
3322 PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
3323 vreg_info->name);
3324 ret = PTR_ERR(vreg_info->hdl);
3325 goto out;
3326 }
3327
3328 if (IS_ERR(vreg_info->hdl)) {
3329 if (vreg_info->required) {
3330 PCIE_DBG(dev, "Vreg %s doesn't exist\n",
3331 vreg_info->name);
3332 ret = PTR_ERR(vreg_info->hdl);
3333 goto out;
3334 } else {
3335 PCIE_DBG(dev,
3336 "Optional Vreg %s doesn't exist\n",
3337 vreg_info->name);
3338 vreg_info->hdl = NULL;
3339 }
3340 } else {
3341 dev->vreg_n++;
3342 snprintf(prop_name, MAX_PROP_SIZE,
3343 "qcom,%s-voltage-level", vreg_info->name);
3344 prop = of_get_property((&pdev->dev)->of_node,
3345 prop_name, &len);
3346 if (!prop || (len != (3 * sizeof(__be32)))) {
3347 PCIE_DBG(dev, "%s %s property\n",
3348 prop ? "invalid format" :
3349 "no", prop_name);
3350 } else {
3351 vreg_info->max_v = be32_to_cpup(&prop[0]);
3352 vreg_info->min_v = be32_to_cpup(&prop[1]);
3353 vreg_info->opt_mode =
3354 be32_to_cpup(&prop[2]);
3355 }
3356 }
3357 }
3358
3359 dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
3360
3361 if (IS_ERR(dev->gdsc)) {
3362 PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
3363 dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
3364 if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
3365 PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
3366 dev->pdev->name);
3367 ret = PTR_ERR(dev->gdsc);
3368 goto out;
3369 }
3370
3371 dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
3372
3373 if (IS_ERR(dev->gdsc_smmu)) {
3374 PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
3375 dev->rc_idx);
3376 dev->gdsc_smmu = NULL;
3377 }
3378
3379 dev->gpio_n = 0;
3380 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
3381 gpio_info = &dev->gpio[i];
3382 ret = of_get_named_gpio((&pdev->dev)->of_node,
3383 gpio_info->name, 0);
3384 if (ret >= 0) {
3385 gpio_info->num = ret;
3386 dev->gpio_n++;
3387 PCIE_DBG(dev, "GPIO num for %s is %d\n",
3388 gpio_info->name, gpio_info->num);
3389 } else {
3390 if (gpio_info->required) {
3391 PCIE_ERR(dev,
3392 "Could not get required GPIO %s\n",
3393 gpio_info->name);
3394 goto out;
3395 } else {
3396 PCIE_DBG(dev,
3397 "Could not get optional GPIO %s\n",
3398 gpio_info->name);
3399 }
3400 }
3401 ret = 0;
3402 }
3403
3404 of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
3405 if (size) {
3406 dev->phy_sequence = (struct msm_pcie_phy_info_t *)
3407 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
3408
3409 if (dev->phy_sequence) {
3410 dev->phy_len =
3411 size / sizeof(*dev->phy_sequence);
3412
3413 of_property_read_u32_array(pdev->dev.of_node,
3414 "qcom,phy-sequence",
3415 (unsigned int *)dev->phy_sequence,
3416 size / sizeof(dev->phy_sequence->offset));
3417 } else {
3418 PCIE_ERR(dev,
3419 "RC%d: Could not allocate memory for phy init sequence.\n",
3420 dev->rc_idx);
3421 ret = -ENOMEM;
3422 goto out;
3423 }
3424 } else {
3425 PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
3426 dev->rc_idx);
3427 }
3428
3429 of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
3430 if (size) {
3431 dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
3432 devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
3433
3434 if (dev->port_phy_sequence) {
3435 dev->port_phy_len =
3436 size / sizeof(*dev->port_phy_sequence);
3437
3438 of_property_read_u32_array(pdev->dev.of_node,
3439 "qcom,port-phy-sequence",
3440 (unsigned int *)dev->port_phy_sequence,
3441 size / sizeof(dev->port_phy_sequence->offset));
3442 } else {
3443 PCIE_ERR(dev,
3444 "RC%d: Could not allocate memory for port phy init sequence.\n",
3445 dev->rc_idx);
3446 ret = -ENOMEM;
3447 goto out;
3448 }
3449 } else {
3450 PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
3451 dev->rc_idx);
3452 }
3453
3454 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
3455 clk_info = &dev->clk[i];
3456
3457 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
3458
3459 if (IS_ERR(clk_info->hdl)) {
3460 if (clk_info->required) {
3461 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
3462 clk_info->name, PTR_ERR(clk_info->hdl));
3463 ret = PTR_ERR(clk_info->hdl);
3464 goto out;
3465 } else {
3466 PCIE_DBG(dev, "Ignoring Clock %s\n",
3467 clk_info->name);
3468 clk_info->hdl = NULL;
3469 }
3470 } else {
3471 if (clkfreq != NULL) {
3472 clk_info->freq = clkfreq[i +
3473 MSM_PCIE_MAX_PIPE_CLK];
3474 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
3475 clk_info->name, clk_info->freq);
3476 }
3477 }
3478 }
3479
3480 for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
3481 clk_info = &dev->pipeclk[i];
3482
3483 clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
3484
3485 if (IS_ERR(clk_info->hdl)) {
3486 if (clk_info->required) {
3487 PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
3488 clk_info->name, PTR_ERR(clk_info->hdl));
3489 ret = PTR_ERR(clk_info->hdl);
3490 goto out;
3491 } else {
3492 PCIE_DBG(dev, "Ignoring Clock %s\n",
3493 clk_info->name);
3494 clk_info->hdl = NULL;
3495 }
3496 } else {
3497 if (clkfreq != NULL) {
3498 clk_info->freq = clkfreq[i];
3499 PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
3500 clk_info->name, clk_info->freq);
3501 }
3502 }
3503 }
3504
3505 for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
3506 reset_info = &dev->reset[i];
3507
3508 reset_info->hdl = devm_reset_control_get(&pdev->dev,
3509 reset_info->name);
3510
3511 if (IS_ERR(reset_info->hdl)) {
3512 if (reset_info->required) {
3513 PCIE_DBG(dev,
3514 "Reset %s isn't available:%ld\n",
3515 reset_info->name,
3516 PTR_ERR(reset_info->hdl));
3517
3518 ret = PTR_ERR(reset_info->hdl);
3519 reset_info->hdl = NULL;
3520 goto out;
3521 } else {
3522 PCIE_DBG(dev, "Ignoring Reset %s\n",
3523 reset_info->name);
3524 reset_info->hdl = NULL;
3525 }
3526 }
3527 }
3528
3529 for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
3530 pipe_reset_info = &dev->pipe_reset[i];
3531
3532 pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
3533 pipe_reset_info->name);
3534
3535 if (IS_ERR(pipe_reset_info->hdl)) {
3536 if (pipe_reset_info->required) {
3537 PCIE_DBG(dev,
3538 "Pipe Reset %s isn't available:%ld\n",
3539 pipe_reset_info->name,
3540 PTR_ERR(pipe_reset_info->hdl));
3541
3542 ret = PTR_ERR(pipe_reset_info->hdl);
3543 pipe_reset_info->hdl = NULL;
3544 goto out;
3545 } else {
3546 PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
3547 pipe_reset_info->name);
3548 pipe_reset_info->hdl = NULL;
3549 }
3550 }
3551 }
3552
3553 dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3554 if (!dev->bus_scale_table) {
3555 PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
3556 dev->rc_idx, dev->pdev->name);
3557 dev->bus_client = 0;
3558 } else {
3559 dev->bus_client =
3560 msm_bus_scale_register_client(dev->bus_scale_table);
3561 if (!dev->bus_client) {
3562 PCIE_ERR(dev,
3563 "PCIe: Failed to register bus client for RC%d (%s)\n",
3564 dev->rc_idx, dev->pdev->name);
3565 msm_bus_cl_clear_pdata(dev->bus_scale_table);
3566 ret = -ENODEV;
3567 goto out;
3568 }
3569 }
3570
3571 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
3572 res_info = &dev->res[i];
3573
3574 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3575 res_info->name);
3576
3577 if (!res) {
3578 PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
3579 dev->rc_idx, res_info->name);
3580 } else {
3581 PCIE_DBG(dev, "start addr for %s is %pa.\n",
3582 res_info->name, &res->start);
3583
3584 res_info->base = devm_ioremap(&pdev->dev,
3585 res->start, resource_size(res));
3586 if (!res_info->base) {
3587 PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
3588 dev->rc_idx, res_info->name);
3589 ret = -ENOMEM;
3590 goto out;
3591 } else {
3592 res_info->resource = res;
3593 }
3594 }
3595 }
3596
3597 for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
3598 irq_info = &dev->irq[i];
3599
3600 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
3601 irq_info->name);
3602
3603 if (!res) {
3604 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
3605 dev->rc_idx, irq_info->name);
3606 } else {
3607 irq_info->num = res->start;
3608 PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
3609 irq_info->num);
3610 }
3611 }
3612
3613 for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
3614 msi_info = &dev->msi[i];
3615
3616 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
3617 msi_info->name);
3618
3619 if (!res) {
3620 PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
3621 dev->rc_idx, msi_info->name);
3622 } else {
3623 msi_info->num = res->start;
3624 PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
3625 msi_info->num);
3626 }
3627 }
3628
3629 /* All allocations succeeded */
3630
3631 if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
3632 dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
3633 else
3634 dev->wake_n = 0;
3635
3636 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
3637 dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
3638 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
3639 dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
3640 dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
3641 dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
3642 dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
3643 dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
3644 dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
3645 dev->dev_io_res->flags = IORESOURCE_IO;
3646
3647out:
3648 kfree(clkfreq);
3649
3650 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3651
3652 return ret;
3653}
3654
3655static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
3656{
3657 dev->parf = NULL;
3658 dev->elbi = NULL;
3659 dev->dm_core = NULL;
3660 dev->conf = NULL;
3661 dev->bars = NULL;
3662 dev->tcsr = NULL;
3663 dev->dev_mem_res = NULL;
3664 dev->dev_io_res = NULL;
3665}
3666
3667int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
3668{
3669 int ret = 0;
3670 uint32_t val;
3671 long int retries = 0;
3672 int link_check_count = 0;
3673
3674 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3675
3676 mutex_lock(&dev->setup_lock);
3677
3678 if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
3679 PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
3680 dev->rc_idx);
3681 goto out;
3682 }
3683
3684 /* assert PCIe reset link to keep EP in reset */
3685
3686 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3687 dev->rc_idx);
3688 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3689 dev->gpio[MSM_PCIE_GPIO_PERST].on);
3690 usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
3691 PERST_PROPAGATION_DELAY_US_MAX);
3692
3693 /* enable power */
3694
3695 if (options & PM_VREG) {
3696 ret = msm_pcie_vreg_init(dev);
3697 if (ret)
3698 goto out;
3699 }
3700
3701 /* enable clocks */
3702 if (options & PM_CLK) {
3703 ret = msm_pcie_clk_init(dev);
3704 /* ensure that changes propagated to the hardware */
3705 wmb();
3706 if (ret)
3707 goto clk_fail;
3708 }
3709
3710 if (dev->scm_dev_id) {
3711 PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
3712 msm_pcie_restore_sec_config(dev);
3713 }
3714
Tony Truongb213ac12017-04-05 15:21:20 -07003715 /* configure PCIe to RC mode */
3716 msm_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x4);
3717
3718 /* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
3719 if (dev->l1_supported)
3720 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
3721
Tony Truong349ee492014-10-01 17:35:56 -07003722 /* enable PCIe clocks and resets */
3723 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
3724
3725 /* change DBI base address */
3726 writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
3727
3728 writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
3729
3730 msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
3731 0, BIT(4));
3732
3733 /* enable selected IRQ */
3734 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
3735 msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
3736
3737 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
3738 BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
3739 BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
3740 BIT(MSM_PCIE_INT_EVT_AER_ERR) |
3741 BIT(MSM_PCIE_INT_EVT_MSI_0) |
3742 BIT(MSM_PCIE_INT_EVT_MSI_1) |
3743 BIT(MSM_PCIE_INT_EVT_MSI_2) |
3744 BIT(MSM_PCIE_INT_EVT_MSI_3) |
3745 BIT(MSM_PCIE_INT_EVT_MSI_4) |
3746 BIT(MSM_PCIE_INT_EVT_MSI_5) |
3747 BIT(MSM_PCIE_INT_EVT_MSI_6) |
3748 BIT(MSM_PCIE_INT_EVT_MSI_7));
3749
3750 PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
3751 dev->rc_idx,
3752 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
3753 }
3754
3755 if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_16M)
3756 writel_relaxed(SZ_32M, dev->parf +
3757 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
3758 else if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_8M)
3759 writel_relaxed(SZ_16M, dev->parf +
3760 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
3761 else
3762 writel_relaxed(SZ_8M, dev->parf +
3763 PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
3764
3765 if (dev->use_msi) {
3766 PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
3767 val = dev->wr_halt_size ? dev->wr_halt_size :
3768 readl_relaxed(dev->parf +
3769 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
3770
3771 msm_pcie_write_reg(dev->parf,
3772 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
3773 BIT(31) | val);
3774
3775 PCIE_DBG(dev,
3776 "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
3777 dev->rc_idx,
3778 readl_relaxed(dev->parf +
3779 PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
3780 }
3781
3782 mutex_lock(&com_phy_lock);
3783 /* init PCIe PHY */
3784 if (!num_rc_on)
3785 pcie_phy_init(dev);
3786
3787 num_rc_on++;
3788 mutex_unlock(&com_phy_lock);
3789
3790 if (options & PM_PIPE_CLK) {
3791 usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
3792 PHY_STABILIZATION_DELAY_US_MAX);
3793 /* Enable the pipe clock */
3794 ret = msm_pcie_pipe_clk_init(dev);
3795 /* ensure that changes propagated to the hardware */
3796 wmb();
3797 if (ret)
3798 goto link_fail;
3799 }
3800
3801 PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
3802
3803 do {
3804 if (pcie_phy_is_ready(dev))
3805 break;
3806 retries++;
3807 usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
3808 REFCLK_STABILIZATION_DELAY_US_MAX);
3809 } while (retries < PHY_READY_TIMEOUT_COUNT);
3810
3811 PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
3812 dev->rc_idx, retries);
3813
3814 if (pcie_phy_is_ready(dev))
3815 PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
3816 else {
3817 PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
3818 dev->rc_idx);
3819 ret = -ENODEV;
3820 pcie_phy_dump(dev);
3821 goto link_fail;
3822 }
3823
3824 pcie_pcs_port_phy_init(dev);
3825
3826 if (dev->ep_latency)
3827 usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
3828
3829 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
3830 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
3831 dev->gpio[MSM_PCIE_GPIO_EP].on);
3832
3833 /* de-assert PCIe reset link to bring EP out of reset */
3834
3835 PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
3836 dev->rc_idx);
3837 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3838 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
3839 usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
3840
3841 /* set max tlp read size */
3842 msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
3843 0x7000, dev->tlp_rd_size);
3844
3845 /* enable link training */
3846 msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
3847
3848 PCIE_DBG(dev, "%s", "check if link is up\n");
3849
3850 /* Wait for up to 100ms for the link to come up */
3851 do {
3852 usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
3853 val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
Tony Truongf9f5ff02017-03-29 11:28:44 -07003854 PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
3855 dev->rc_idx, (val >> 12) & 0x3f);
Tony Truong349ee492014-10-01 17:35:56 -07003856 } while ((!(val & XMLH_LINK_UP) ||
3857 !msm_pcie_confirm_linkup(dev, false, false, NULL))
3858 && (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
3859
3860 if ((val & XMLH_LINK_UP) &&
3861 msm_pcie_confirm_linkup(dev, false, false, NULL)) {
3862 PCIE_DBG(dev, "Link is up after %d checkings\n",
3863 link_check_count);
3864 PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
3865 } else {
3866 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3867 dev->rc_idx);
3868 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3869 dev->gpio[MSM_PCIE_GPIO_PERST].on);
3870 PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
3871 dev->rc_idx);
3872 ret = -1;
3873 goto link_fail;
3874 }
3875
3876 msm_pcie_config_controller(dev);
3877
3878 if (!dev->msi_gicm_addr)
3879 msm_pcie_config_msi_controller(dev);
3880
3881 msm_pcie_config_link_state(dev);
3882
Tony Truong7772e692017-04-13 17:03:34 -07003883 if (dev->enumerated)
3884 pci_walk_bus(dev->dev->bus, &msm_pcie_config_device, dev);
3885
Tony Truong349ee492014-10-01 17:35:56 -07003886 dev->link_status = MSM_PCIE_LINK_ENABLED;
3887 dev->power_on = true;
3888 dev->suspending = false;
3889 dev->link_turned_on_counter++;
3890
3891 goto out;
3892
3893link_fail:
3894 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
3895 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
3896 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
3897 msm_pcie_write_reg(dev->phy,
3898 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
3899 msm_pcie_write_reg(dev->phy,
3900 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
3901
3902 mutex_lock(&com_phy_lock);
3903 num_rc_on--;
3904 if (!num_rc_on && dev->common_phy) {
3905 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
3906 dev->rc_idx);
3907 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
3908 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
3909 }
3910 mutex_unlock(&com_phy_lock);
3911
3912 msm_pcie_pipe_clk_deinit(dev);
3913 msm_pcie_clk_deinit(dev);
3914clk_fail:
3915 msm_pcie_vreg_deinit(dev);
3916out:
3917 mutex_unlock(&dev->setup_lock);
3918
3919 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3920
3921 return ret;
3922}
3923
3924void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
3925{
3926 PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
3927
3928 mutex_lock(&dev->setup_lock);
3929
3930 if (!dev->power_on) {
3931 PCIE_DBG(dev,
3932 "PCIe: the link of RC%d is already power down.\n",
3933 dev->rc_idx);
3934 mutex_unlock(&dev->setup_lock);
3935 return;
3936 }
3937
3938 dev->link_status = MSM_PCIE_LINK_DISABLED;
3939 dev->power_on = false;
3940 dev->link_turned_off_counter++;
3941
3942 PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
3943 dev->rc_idx);
3944
3945 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
3946 dev->gpio[MSM_PCIE_GPIO_PERST].on);
3947
3948 msm_pcie_write_reg(dev->phy,
3949 PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
3950 msm_pcie_write_reg(dev->phy,
3951 PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
3952
3953 mutex_lock(&com_phy_lock);
3954 num_rc_on--;
3955 if (!num_rc_on && dev->common_phy) {
3956 PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
3957 dev->rc_idx);
3958 msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
3959 msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
3960 }
3961 mutex_unlock(&com_phy_lock);
3962
3963 if (options & PM_CLK) {
3964 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
3965 BIT(0));
3966 msm_pcie_clk_deinit(dev);
3967 }
3968
3969 if (options & PM_VREG)
3970 msm_pcie_vreg_deinit(dev);
3971
3972 if (options & PM_PIPE_CLK)
3973 msm_pcie_pipe_clk_deinit(dev);
3974
3975 if (dev->gpio[MSM_PCIE_GPIO_EP].num)
3976 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
3977 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
3978
3979 mutex_unlock(&dev->setup_lock);
3980
3981 PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
3982}
3983
3984static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
3985 struct msm_pcie_device_info *ep_dev_info)
3986{
3987 u32 val;
3988 void __iomem *ep_base = ep_dev_info->conf_base;
3989 u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
3990 0xff;
3991
3992 while (current_offset) {
3993 if (msm_pcie_check_align(dev, current_offset))
3994 return;
3995
3996 val = readl_relaxed(ep_base + current_offset);
3997 if ((val & 0xff) == PCIE20_CAP_ID) {
3998 ep_dev_info->dev_ctrlstts_offset =
3999 current_offset + 0x8;
4000 break;
4001 }
4002 current_offset = (val >> 8) & 0xff;
4003 }
4004
4005 if (!ep_dev_info->dev_ctrlstts_offset) {
4006 PCIE_DBG(dev,
4007 "RC%d endpoint does not support PCIe cap registers\n",
4008 dev->rc_idx);
4009 return;
4010 }
4011
4012 PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
4013 dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
4014
4015 /* Enable AER on EP */
4016 msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
4017 BIT(3)|BIT(2)|BIT(1)|BIT(0));
4018
4019 PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
4020 readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
4021}
4022
4023static int msm_pcie_config_device_table(struct device *dev, void *pdev)
4024{
4025 struct pci_dev *pcidev = to_pci_dev(dev);
4026 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
4027 struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
4028 struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
4029 int ret = 0;
4030 u32 rc_idx = pcie_dev->rc_idx;
4031 u32 i, index;
4032 u32 bdf = 0;
4033 u8 type;
4034 u32 h_type;
4035 u32 bme;
4036
4037 if (!pcidev) {
4038 PCIE_ERR(pcie_dev,
4039 "PCIe: Did not find PCI device in list for RC%d.\n",
4040 pcie_dev->rc_idx);
4041 return -ENODEV;
4042 }
4043
4044 PCIE_DBG(pcie_dev,
4045 "PCI device found: vendor-id:0x%x device-id:0x%x\n",
4046 pcidev->vendor, pcidev->device);
4047
4048 if (!pcidev->bus->number)
4049 return ret;
4050
4051 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4052 type = pcidev->bus->number == 1 ?
4053 PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
4054
4055 for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
4056 if (msm_pcie_dev_tbl[i].bdf == bdf &&
4057 !msm_pcie_dev_tbl[i].dev) {
4058 for (index = 0; index < MAX_DEVICE_NUM; index++) {
4059 if (dev_table_t[index].bdf == bdf) {
4060 msm_pcie_dev_tbl[i].dev = pcidev;
4061 msm_pcie_dev_tbl[i].domain = rc_idx;
4062 msm_pcie_dev_tbl[i].conf_base =
4063 pcie_dev->conf + index * SZ_4K;
4064 msm_pcie_dev_tbl[i].phy_address =
4065 axi_conf->start + index * SZ_4K;
4066
4067 dev_table_t[index].dev = pcidev;
4068 dev_table_t[index].domain = rc_idx;
4069 dev_table_t[index].conf_base =
4070 pcie_dev->conf + index * SZ_4K;
4071 dev_table_t[index].phy_address =
4072 axi_conf->start + index * SZ_4K;
4073
4074 msm_pcie_iatu_config(pcie_dev, index,
4075 type,
4076 dev_table_t[index].phy_address,
4077 dev_table_t[index].phy_address
4078 + SZ_4K - 1,
4079 bdf);
4080
4081 h_type = readl_relaxed(
4082 dev_table_t[index].conf_base +
4083 PCIE20_HEADER_TYPE);
4084
4085 bme = readl_relaxed(
4086 dev_table_t[index].conf_base +
4087 PCIE20_COMMAND_STATUS);
4088
4089 if (h_type & (1 << 16)) {
4090 pci_write_config_dword(pcidev,
4091 PCIE20_COMMAND_STATUS,
4092 bme | 0x06);
4093 } else {
4094 pcie_dev->num_ep++;
4095 dev_table_t[index].registered =
4096 false;
4097 }
4098
4099 if (pcie_dev->num_ep > 1)
4100 pcie_dev->pending_ep_reg = true;
4101
4102 msm_pcie_config_ep_aer(pcie_dev,
4103 &dev_table_t[index]);
4104
4105 break;
4106 }
4107 }
4108 if (index == MAX_DEVICE_NUM) {
4109 PCIE_ERR(pcie_dev,
4110 "RC%d PCI device table is full.\n",
4111 rc_idx);
4112 ret = index;
4113 } else {
4114 break;
4115 }
4116 } else if (msm_pcie_dev_tbl[i].bdf == bdf &&
4117 pcidev == msm_pcie_dev_tbl[i].dev) {
4118 break;
4119 }
4120 }
4121 if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
4122 PCIE_ERR(pcie_dev,
4123 "Global PCI device table is full: %d elements.\n",
4124 i);
4125 PCIE_ERR(pcie_dev,
4126 "Bus number is 0x%x\nDevice number is 0x%x\n",
4127 pcidev->bus->number, pcidev->devfn);
4128 ret = i;
4129 }
4130 return ret;
4131}
4132
4133int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
4134{
4135 struct pci_dev *pcidev;
4136 struct msm_pcie_dev_t *pcie_dev;
4137 struct pci_bus *bus;
4138 int i;
4139 u32 bdf;
4140
4141 if (!dev) {
4142 pr_err("%s: PCIe: endpoint device passed in is NULL\n",
4143 __func__);
4144 return MSM_PCIE_ERROR;
4145 }
4146
4147 pcidev = to_pci_dev(dev);
4148 if (!pcidev) {
4149 pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
4150 __func__);
4151 return MSM_PCIE_ERROR;
4152 }
4153
4154 bus = pcidev->bus;
4155 if (!bus) {
4156 pr_err("%s: PCIe: Bus of PCI device is NULL\n",
4157 __func__);
4158 return MSM_PCIE_ERROR;
4159 }
4160
4161 while (!pci_is_root_bus(bus))
4162 bus = bus->parent;
4163
4164 pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
4165 if (!pcie_dev) {
4166 pr_err("%s: PCIe: Could not get PCIe structure\n",
4167 __func__);
4168 return MSM_PCIE_ERROR;
4169 }
4170
4171 if (!pcie_dev->smmu_exist) {
4172 PCIE_DBG(pcie_dev,
4173 "PCIe: RC:%d: smmu does not exist\n",
4174 pcie_dev->rc_idx);
4175 return MSM_PCIE_ERROR;
4176 }
4177
4178 PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
4179 pcie_dev->rc_idx, dev);
4180 PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
4181 pcie_dev->rc_idx, pcidev);
4182
4183 *domain = pcie_dev->rc_idx;
4184
4185 if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
4186 pcie_dev->current_short_bdf++;
4187 } else {
4188 PCIE_ERR(pcie_dev,
4189 "PCIe: RC%d: No more short BDF left\n",
4190 pcie_dev->rc_idx);
4191 return MSM_PCIE_ERROR;
4192 }
4193
4194 bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
4195
4196 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4197 if (pcie_dev->pcidev_table[i].bdf == bdf) {
4198 *sid = pcie_dev->smmu_sid_base +
4199 ((pcie_dev->rc_idx << 4) |
4200 pcie_dev->current_short_bdf);
4201
4202 msm_pcie_write_reg(pcie_dev->parf,
4203 PCIE20_PARF_BDF_TRANSLATE_N +
4204 pcie_dev->current_short_bdf * 4,
4205 bdf >> 16);
4206
4207 pcie_dev->pcidev_table[i].sid = *sid;
4208 pcie_dev->pcidev_table[i].short_bdf =
4209 pcie_dev->current_short_bdf;
4210 break;
4211 }
4212 }
4213
4214 if (i == MAX_DEVICE_NUM) {
4215 pcie_dev->current_short_bdf--;
4216 PCIE_ERR(pcie_dev,
4217 "PCIe: RC%d could not find BDF:%d\n",
4218 pcie_dev->rc_idx, bdf);
4219 return MSM_PCIE_ERROR;
4220 }
4221
4222 PCIE_DBG(pcie_dev,
4223 "PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
4224 pcie_dev->rc_idx,
4225 bdf >> 24,
4226 bdf >> 19 & 0x1f,
4227 bdf >> 16 & 0x07,
4228 *sid);
4229
4230 return 0;
4231}
4232EXPORT_SYMBOL(msm_pcie_configure_sid);
4233
4234int msm_pcie_enumerate(u32 rc_idx)
4235{
4236 int ret = 0, bus_ret = 0, scan_ret = 0;
4237 struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
4238
4239 mutex_lock(&dev->enumerate_lock);
4240
4241 PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
4242
4243 if (!dev->drv_ready) {
4244 PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
4245 rc_idx);
4246 ret = -EPROBE_DEFER;
4247 goto out;
4248 }
4249
4250 if (!dev->enumerated) {
4251 ret = msm_pcie_enable(dev, PM_ALL);
4252
4253 /* kick start ARM PCI configuration framework */
4254 if (!ret) {
4255 struct pci_dev *pcidev = NULL;
4256 bool found = false;
4257 struct pci_bus *bus;
4258 resource_size_t iobase = 0;
4259 u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
4260 u32 vendor_id = ids & 0xffff;
4261 u32 device_id = (ids & 0xffff0000) >> 16;
4262 LIST_HEAD(res);
4263
4264 PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
4265 vendor_id, device_id);
4266
4267 ret = of_pci_get_host_bridge_resources(
4268 dev->pdev->dev.of_node,
4269 0, 0xff, &res, &iobase);
4270 if (ret) {
4271 PCIE_ERR(dev,
4272 "PCIe: failed to get host bridge resources for RC%d: %d\n",
4273 dev->rc_idx, ret);
4274 goto out;
4275 }
4276
4277 bus = pci_create_root_bus(&dev->pdev->dev, 0,
4278 &msm_pcie_ops,
4279 msm_pcie_setup_sys_data(dev),
4280 &res);
4281 if (!bus) {
4282 PCIE_ERR(dev,
4283 "PCIe: failed to create root bus for RC%d\n",
4284 dev->rc_idx);
4285 ret = -ENOMEM;
4286 goto out;
4287 }
4288
4289 scan_ret = pci_scan_child_bus(bus);
4290 PCIE_DBG(dev,
4291 "PCIe: RC%d: The max subordinate bus number discovered is %d\n",
4292 dev->rc_idx, ret);
4293
4294 msm_pcie_fixup_irqs(dev);
4295 pci_assign_unassigned_bus_resources(bus);
4296 pci_bus_add_devices(bus);
4297
4298 dev->enumerated = true;
4299
4300 msm_pcie_write_mask(dev->dm_core +
4301 PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
4302
4303 if (dev->cpl_timeout && dev->bridge_found)
4304 msm_pcie_write_reg_field(dev->dm_core,
4305 PCIE20_DEVICE_CONTROL2_STATUS2,
4306 0xf, dev->cpl_timeout);
4307
4308 if (dev->shadow_en) {
4309 u32 val = readl_relaxed(dev->dm_core +
4310 PCIE20_COMMAND_STATUS);
4311 PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
4312 val);
4313 dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
4314 }
4315
4316 do {
4317 pcidev = pci_get_device(vendor_id,
4318 device_id, pcidev);
4319 if (pcidev && (&msm_pcie_dev[rc_idx] ==
4320 (struct msm_pcie_dev_t *)
4321 PCIE_BUS_PRIV_DATA(pcidev->bus))) {
4322 msm_pcie_dev[rc_idx].dev = pcidev;
4323 found = true;
4324 PCIE_DBG(&msm_pcie_dev[rc_idx],
4325 "PCI device is found for RC%d\n",
4326 rc_idx);
4327 }
4328 } while (!found && pcidev);
4329
4330 if (!pcidev) {
4331 PCIE_ERR(dev,
4332 "PCIe: Did not find PCI device for RC%d.\n",
4333 dev->rc_idx);
4334 ret = -ENODEV;
4335 goto out;
4336 }
4337
4338 bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
4339 &msm_pcie_config_device_table);
4340
4341 if (bus_ret) {
4342 PCIE_ERR(dev,
4343 "PCIe: Failed to set up device table for RC%d\n",
4344 dev->rc_idx);
4345 ret = -ENODEV;
4346 goto out;
4347 }
4348 } else {
4349 PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
4350 dev->rc_idx);
4351 }
4352 } else {
4353 PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
4354 dev->rc_idx);
4355 }
4356
4357out:
4358 mutex_unlock(&dev->enumerate_lock);
4359
4360 return ret;
4361}
4362EXPORT_SYMBOL(msm_pcie_enumerate);
4363
4364static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
4365 enum msm_pcie_event event)
4366{
4367 if (dev->event_reg && dev->event_reg->callback &&
4368 (dev->event_reg->events & event)) {
4369 struct msm_pcie_notify *notify = &dev->event_reg->notify;
4370
4371 notify->event = event;
4372 notify->user = dev->event_reg->user;
4373 PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
4374 dev->rc_idx, event);
4375 dev->event_reg->callback(notify);
4376
4377 if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
4378 (event == MSM_PCIE_EVENT_LINKDOWN)) {
4379 dev->user_suspend = true;
4380 PCIE_DBG(dev,
4381 "PCIe: Client of RC%d will recover the link later.\n",
4382 dev->rc_idx);
4383 return;
4384 }
4385 } else {
4386 PCIE_DBG2(dev,
4387 "PCIe: Client of RC%d does not have registration for event %d\n",
4388 dev->rc_idx, event);
4389 }
4390}
4391
4392static void handle_wake_func(struct work_struct *work)
4393{
4394 int i, ret;
4395 struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
4396 handle_wake_work);
4397
4398 PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
4399
4400 mutex_lock(&dev->recovery_lock);
4401
4402 if (!dev->enumerated) {
4403 PCIE_DBG(dev,
4404 "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
4405 dev->rc_idx);
4406
4407 ret = msm_pcie_enumerate(dev->rc_idx);
4408 if (ret) {
4409 PCIE_ERR(dev,
4410 "PCIe: failed to enable RC%d upon wake request from the device.\n",
4411 dev->rc_idx);
4412 goto out;
4413 }
4414
4415 if (dev->num_ep > 1) {
4416 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4417 dev->event_reg = dev->pcidev_table[i].event_reg;
4418
4419 if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
4420 && dev->event_reg &&
4421 dev->event_reg->callback &&
4422 (dev->event_reg->events &
4423 MSM_PCIE_EVENT_LINKUP)) {
4424 struct msm_pcie_notify *notify =
4425 &dev->event_reg->notify;
4426 notify->event = MSM_PCIE_EVENT_LINKUP;
4427 notify->user = dev->event_reg->user;
4428 PCIE_DBG(dev,
4429 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
4430 dev->rc_idx);
4431 dev->event_reg->callback(notify);
4432 }
4433 }
4434 } else {
4435 if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
4436 dev->event_reg && dev->event_reg->callback &&
4437 (dev->event_reg->events &
4438 MSM_PCIE_EVENT_LINKUP)) {
4439 struct msm_pcie_notify *notify =
4440 &dev->event_reg->notify;
4441 notify->event = MSM_PCIE_EVENT_LINKUP;
4442 notify->user = dev->event_reg->user;
4443 PCIE_DBG(dev,
4444 "PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
4445 dev->rc_idx);
4446 dev->event_reg->callback(notify);
4447 } else {
4448 PCIE_DBG(dev,
4449 "PCIe: Client of RC%d does not have registration for linkup event.\n",
4450 dev->rc_idx);
4451 }
4452 }
4453 goto out;
4454 } else {
4455 PCIE_ERR(dev,
4456 "PCIe: The enumeration for RC%d has already been done.\n",
4457 dev->rc_idx);
4458 goto out;
4459 }
4460
4461out:
4462 mutex_unlock(&dev->recovery_lock);
4463}
4464
4465static irqreturn_t handle_aer_irq(int irq, void *data)
4466{
4467 struct msm_pcie_dev_t *dev = data;
4468
4469 int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
4470 int ep_corr_val = 0, ep_uncorr_val = 0;
4471 int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
4472 u32 ep_dev_ctrlstts_offset = 0;
4473 int i, j, ep_src_bdf = 0;
4474 void __iomem *ep_base = NULL;
4475 unsigned long irqsave_flags;
4476
4477 PCIE_DBG2(dev,
4478 "AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
4479 dev->rc_idx, irq, dev->rc_corr_counter,
4480 dev->rc_non_fatal_counter, dev->rc_fatal_counter,
4481 dev->ep_corr_counter, dev->ep_non_fatal_counter,
4482 dev->ep_fatal_counter);
4483
4484 spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
4485
4486 if (dev->suspending) {
4487 PCIE_DBG2(dev,
4488 "PCIe: RC%d is currently suspending.\n",
4489 dev->rc_idx);
4490 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
4491 return IRQ_HANDLED;
4492 }
4493
4494 uncorr_val = readl_relaxed(dev->dm_core +
4495 PCIE20_AER_UNCORR_ERR_STATUS_REG);
4496 corr_val = readl_relaxed(dev->dm_core +
4497 PCIE20_AER_CORR_ERR_STATUS_REG);
4498 rc_err_status = readl_relaxed(dev->dm_core +
4499 PCIE20_AER_ROOT_ERR_STATUS_REG);
4500 rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
4501 PCIE20_CAP_DEVCTRLSTATUS);
4502
4503 if (uncorr_val)
4504 PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
4505 uncorr_val);
4506 if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
4507 PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
4508 corr_val);
4509
4510 if ((rc_dev_ctrlstts >> 18) & 0x1)
4511 dev->rc_fatal_counter++;
4512 if ((rc_dev_ctrlstts >> 17) & 0x1)
4513 dev->rc_non_fatal_counter++;
4514 if ((rc_dev_ctrlstts >> 16) & 0x1)
4515 dev->rc_corr_counter++;
4516
4517 msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
4518 BIT(18)|BIT(17)|BIT(16));
4519
4520 if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
4521 PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
4522 goto out;
4523 }
4524
4525 for (i = 0; i < 2; i++) {
4526 if (i)
4527 ep_src_bdf = readl_relaxed(dev->dm_core +
4528 PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
4529 else
4530 ep_src_bdf = (readl_relaxed(dev->dm_core +
4531 PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
4532
4533 if (!ep_src_bdf)
4534 continue;
4535
4536 for (j = 0; j < MAX_DEVICE_NUM; j++) {
4537 if (ep_src_bdf == dev->pcidev_table[j].bdf) {
4538 PCIE_DBG2(dev,
4539 "PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
4540 i ? "Uncorrectable" : "Correctable",
4541 dev->pcidev_table[j].bdf >> 24,
4542 dev->pcidev_table[j].bdf >> 19 & 0x1f,
4543 dev->pcidev_table[j].bdf >> 16 & 0x07);
4544 ep_base = dev->pcidev_table[j].conf_base;
4545 ep_dev_ctrlstts_offset = dev->
4546 pcidev_table[j].dev_ctrlstts_offset;
4547 break;
4548 }
4549 }
4550
4551 if (!ep_base) {
4552 PCIE_ERR(dev,
4553 "PCIe: RC%d no endpoint found for reported error\n",
4554 dev->rc_idx);
4555 goto out;
4556 }
4557
4558 ep_uncorr_val = readl_relaxed(ep_base +
4559 PCIE20_AER_UNCORR_ERR_STATUS_REG);
4560 ep_corr_val = readl_relaxed(ep_base +
4561 PCIE20_AER_CORR_ERR_STATUS_REG);
4562 ep_dev_ctrlstts = readl_relaxed(ep_base +
4563 ep_dev_ctrlstts_offset);
4564
4565 if (ep_uncorr_val)
4566 PCIE_DBG(dev,
4567 "EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
4568 ep_uncorr_val);
4569 if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
4570 PCIE_DBG(dev,
4571 "EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
4572 ep_corr_val);
4573
4574 if ((ep_dev_ctrlstts >> 18) & 0x1)
4575 dev->ep_fatal_counter++;
4576 if ((ep_dev_ctrlstts >> 17) & 0x1)
4577 dev->ep_non_fatal_counter++;
4578 if ((ep_dev_ctrlstts >> 16) & 0x1)
4579 dev->ep_corr_counter++;
4580
4581 msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
4582 BIT(18)|BIT(17)|BIT(16));
4583
4584 msm_pcie_write_reg_field(ep_base,
4585 PCIE20_AER_UNCORR_ERR_STATUS_REG,
4586 0x3fff031, 0x3fff031);
4587 msm_pcie_write_reg_field(ep_base,
4588 PCIE20_AER_CORR_ERR_STATUS_REG,
4589 0xf1c1, 0xf1c1);
4590 }
4591out:
4592 if (((dev->rc_corr_counter < corr_counter_limit) &&
4593 (dev->ep_corr_counter < corr_counter_limit)) ||
4594 uncorr_val || ep_uncorr_val)
4595 PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
4596 rc_err_status);
4597 msm_pcie_write_reg_field(dev->dm_core,
4598 PCIE20_AER_UNCORR_ERR_STATUS_REG,
4599 0x3fff031, 0x3fff031);
4600 msm_pcie_write_reg_field(dev->dm_core,
4601 PCIE20_AER_CORR_ERR_STATUS_REG,
4602 0xf1c1, 0xf1c1);
4603 msm_pcie_write_reg_field(dev->dm_core,
4604 PCIE20_AER_ROOT_ERR_STATUS_REG,
4605 0x7f, 0x7f);
4606
4607 spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
4608 return IRQ_HANDLED;
4609}
4610
4611static irqreturn_t handle_wake_irq(int irq, void *data)
4612{
4613 struct msm_pcie_dev_t *dev = data;
4614 unsigned long irqsave_flags;
4615 int i;
4616
4617 spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
4618
4619 dev->wake_counter++;
4620 PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
4621 dev->wake_counter, dev->rc_idx);
4622
4623 PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
4624 dev->rc_idx);
4625
Tony Truong9f2c7722017-02-28 15:02:27 -08004626 if (!dev->enumerated && !(dev->boot_option &
4627 MSM_PCIE_NO_WAKE_ENUMERATION)) {
4628 PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
4629 schedule_work(&dev->handle_wake_work);
Tony Truong349ee492014-10-01 17:35:56 -07004630 } else {
4631 PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
4632 __pm_stay_awake(&dev->ws);
4633 __pm_relax(&dev->ws);
4634
4635 if (dev->num_ep > 1) {
4636 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4637 dev->event_reg =
4638 dev->pcidev_table[i].event_reg;
4639 msm_pcie_notify_client(dev,
4640 MSM_PCIE_EVENT_WAKEUP);
4641 }
4642 } else {
4643 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
4644 }
4645 }
4646
4647 spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
4648
4649 return IRQ_HANDLED;
4650}
4651
4652static irqreturn_t handle_linkdown_irq(int irq, void *data)
4653{
4654 struct msm_pcie_dev_t *dev = data;
4655 unsigned long irqsave_flags;
4656 int i;
4657
4658 spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
4659
4660 dev->linkdown_counter++;
4661
4662 PCIE_DBG(dev,
4663 "PCIe: No. %ld linkdown IRQ for RC%d.\n",
4664 dev->linkdown_counter, dev->rc_idx);
4665
4666 if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
4667 PCIE_DBG(dev,
4668 "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
4669 dev->rc_idx);
4670 } else if (dev->suspending) {
4671 PCIE_DBG(dev,
4672 "PCIe:the link of RC%d is suspending.\n",
4673 dev->rc_idx);
4674 } else {
4675 dev->link_status = MSM_PCIE_LINK_DISABLED;
4676 dev->shadow_en = false;
4677
4678 if (dev->linkdown_panic)
4679 panic("User has chosen to panic on linkdown\n");
4680
4681 /* assert PERST */
4682 gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
4683 dev->gpio[MSM_PCIE_GPIO_PERST].on);
4684 PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
4685
4686 if (dev->num_ep > 1) {
4687 for (i = 0; i < MAX_DEVICE_NUM; i++) {
4688 dev->event_reg =
4689 dev->pcidev_table[i].event_reg;
4690 msm_pcie_notify_client(dev,
4691 MSM_PCIE_EVENT_LINKDOWN);
4692 }
4693 } else {
4694 msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
4695 }
4696 }
4697
4698 spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
4699
4700 return IRQ_HANDLED;
4701}
4702
4703static irqreturn_t handle_msi_irq(int irq, void *data)
4704{
4705 int i, j;
4706 unsigned long val;
4707 struct msm_pcie_dev_t *dev = data;
4708 void __iomem *ctrl_status;
4709
4710 PCIE_DUMP(dev, "irq: %d\n", irq);
4711
4712 /*
4713 * check for set bits, clear it by setting that bit
4714 * and trigger corresponding irq
4715 */
4716 for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
4717 ctrl_status = dev->dm_core +
4718 PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
4719
4720 val = readl_relaxed(ctrl_status);
4721 while (val) {
4722 j = find_first_bit(&val, 32);
4723 writel_relaxed(BIT(j), ctrl_status);
4724 /* ensure that interrupt is cleared (acked) */
4725 wmb();
4726 generic_handle_irq(
4727 irq_find_mapping(dev->irq_domain, (j + (32*i)))
4728 );
4729 val = readl_relaxed(ctrl_status);
4730 }
4731 }
4732
4733 return IRQ_HANDLED;
4734}
4735
4736static irqreturn_t handle_global_irq(int irq, void *data)
4737{
4738 int i;
4739 struct msm_pcie_dev_t *dev = data;
4740 unsigned long irqsave_flags;
4741 u32 status = 0;
4742
4743 spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
4744
4745 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
4746 readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
4747
4748 msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
4749
4750 PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
4751 dev->rc_idx, irq, status);
4752
4753 for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
4754 if (status & BIT(i)) {
4755 switch (i) {
4756 case MSM_PCIE_INT_EVT_LINK_DOWN:
4757 PCIE_DBG(dev,
4758 "PCIe: RC%d: handle linkdown event.\n",
4759 dev->rc_idx);
4760 handle_linkdown_irq(irq, data);
4761 break;
4762 case MSM_PCIE_INT_EVT_AER_LEGACY:
4763 PCIE_DBG(dev,
4764 "PCIe: RC%d: AER legacy event.\n",
4765 dev->rc_idx);
4766 handle_aer_irq(irq, data);
4767 break;
4768 case MSM_PCIE_INT_EVT_AER_ERR:
4769 PCIE_DBG(dev,
4770 "PCIe: RC%d: AER event.\n",
4771 dev->rc_idx);
4772 handle_aer_irq(irq, data);
4773 break;
4774 default:
Tony Truong3f110d42017-04-07 17:12:23 -07004775 PCIE_DUMP(dev,
Tony Truong349ee492014-10-01 17:35:56 -07004776 "PCIe: RC%d: Unexpected event %d is caught!\n",
4777 dev->rc_idx, i);
4778 }
4779 }
4780 }
4781
4782 spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
4783
4784 return IRQ_HANDLED;
4785}
4786
Tony Truong52122a62017-03-23 18:00:34 -07004787static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
4788 struct pci_dev *pdev)
4789{
4790 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
4791 int bypass_en = 0;
4792
4793 if (!domain) {
4794 PCIE_DBG(dev,
4795 "PCIe: RC%d: client does not have an iommu domain\n",
4796 dev->rc_idx);
4797 return;
4798 }
4799
4800 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
4801 if (!bypass_en) {
4802 int ret;
4803 phys_addr_t pcie_base_addr =
4804 dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
4805 dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
4806
4807 ret = iommu_unmap(domain, iova, PAGE_SIZE);
4808 if (ret != PAGE_SIZE)
4809 PCIE_ERR(dev,
4810 "PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
4811 dev->rc_idx, ret);
4812 }
4813}
4814
Tony Truongc3c52ae2017-03-29 12:16:51 -07004815void msm_pcie_destroy_irq(unsigned int irq)
Tony Truong349ee492014-10-01 17:35:56 -07004816{
Tony Truongc3c52ae2017-03-29 12:16:51 -07004817 int pos;
4818 struct pci_dev *pdev = irq_get_chip_data(irq);
4819 struct msi_desc *entry = irq_get_msi_desc(irq);
4820 struct msi_desc *firstentry;
Tony Truong349ee492014-10-01 17:35:56 -07004821 struct msm_pcie_dev_t *dev;
Tony Truongc3c52ae2017-03-29 12:16:51 -07004822 u32 nvec;
4823 int firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07004824
Tony Truongc3c52ae2017-03-29 12:16:51 -07004825 if (!pdev) {
4826 pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
Tony Truong349ee492014-10-01 17:35:56 -07004827 return;
4828 }
4829
Tony Truongc3c52ae2017-03-29 12:16:51 -07004830 dev = PCIE_BUS_PRIV_DATA(pdev->bus);
4831 if (!dev) {
4832 pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
4833 return;
4834 }
4835
4836 if (!entry) {
4837 PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
4838 dev->rc_idx, irq);
4839 return;
4840 }
4841
4842 firstentry = first_pci_msi_entry(pdev);
4843 if (!firstentry) {
4844 PCIE_ERR(dev,
4845 "PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
4846 dev->rc_idx, irq);
4847 return;
4848 }
4849
4850 firstirq = firstentry->irq;
4851 nvec = (1 << entry->msi_attrib.multiple);
4852
Tony Truong349ee492014-10-01 17:35:56 -07004853 if (dev->msi_gicm_addr) {
4854 PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
4855
Tony Truongc3c52ae2017-03-29 12:16:51 -07004856 if (irq < firstirq || irq > firstirq + nvec - 1) {
Tony Truong349ee492014-10-01 17:35:56 -07004857 PCIE_ERR(dev,
4858 "Could not find irq: %d in RC%d MSI table\n",
4859 irq, dev->rc_idx);
4860 return;
4861 }
Tony Truong52122a62017-03-23 18:00:34 -07004862 if (irq == firstirq + nvec - 1)
4863 msm_pcie_unmap_qgic_addr(dev, pdev);
Tony Truongc3c52ae2017-03-29 12:16:51 -07004864 pos = irq - firstirq;
Tony Truong349ee492014-10-01 17:35:56 -07004865 } else {
4866 PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
4867 pos = irq - irq_find_mapping(dev->irq_domain, 0);
4868 }
4869
4870 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4871
4872 PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
4873 pos, *dev->msi_irq_in_use);
4874 clear_bit(pos, dev->msi_irq_in_use);
4875 PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
4876 pos, *dev->msi_irq_in_use);
4877}
4878
4879/* hookup to linux pci msi framework */
4880void arch_teardown_msi_irq(unsigned int irq)
4881{
4882 PCIE_GEN_DBG("irq %d deallocated\n", irq);
Tony Truongc3c52ae2017-03-29 12:16:51 -07004883 msm_pcie_destroy_irq(irq);
Tony Truong349ee492014-10-01 17:35:56 -07004884}
4885
4886void arch_teardown_msi_irqs(struct pci_dev *dev)
4887{
4888 struct msi_desc *entry;
4889 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
4890
4891 PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
4892 pcie_dev->rc_idx, dev->vendor, dev->device);
4893
4894 pcie_dev->use_msi = false;
4895
4896 list_for_each_entry(entry, &dev->dev.msi_list, list) {
4897 int i, nvec;
4898
4899 if (entry->irq == 0)
4900 continue;
4901 nvec = 1 << entry->msi_attrib.multiple;
4902 for (i = 0; i < nvec; i++)
Tony Truongc3c52ae2017-03-29 12:16:51 -07004903 arch_teardown_msi_irq(entry->irq + i);
Tony Truong349ee492014-10-01 17:35:56 -07004904 }
4905}
4906
4907static void msm_pcie_msi_nop(struct irq_data *d)
4908{
4909}
4910
4911static struct irq_chip pcie_msi_chip = {
4912 .name = "msm-pcie-msi",
4913 .irq_ack = msm_pcie_msi_nop,
4914 .irq_enable = unmask_msi_irq,
4915 .irq_disable = mask_msi_irq,
4916 .irq_mask = mask_msi_irq,
4917 .irq_unmask = unmask_msi_irq,
4918};
4919
4920static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
4921{
4922 int irq, pos;
4923
4924 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4925
4926again:
4927 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
4928
4929 if (pos >= PCIE_MSI_NR_IRQS)
4930 return -ENOSPC;
4931
4932 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
4933
4934 if (test_and_set_bit(pos, dev->msi_irq_in_use))
4935 goto again;
4936 else
4937 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
4938
4939 irq = irq_create_mapping(dev->irq_domain, pos);
4940 if (!irq)
4941 return -EINVAL;
4942
4943 return irq;
4944}
4945
4946static int arch_setup_msi_irq_default(struct pci_dev *pdev,
4947 struct msi_desc *desc, int nvec)
4948{
4949 int irq;
4950 struct msi_msg msg;
4951 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
4952
4953 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4954
4955 irq = msm_pcie_create_irq(dev);
4956
4957 PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
4958
4959 if (irq < 0)
4960 return irq;
4961
4962 PCIE_DBG(dev, "irq %d allocated\n", irq);
4963
Tony Truongc3c52ae2017-03-29 12:16:51 -07004964 irq_set_chip_data(irq, pdev);
Tony Truong349ee492014-10-01 17:35:56 -07004965 irq_set_msi_desc(irq, desc);
4966
4967 /* write msi vector and data */
4968 msg.address_hi = 0;
4969 msg.address_lo = MSM_PCIE_MSI_PHY;
4970 msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
4971 write_msi_msg(irq, &msg);
4972
4973 return 0;
4974}
4975
4976static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
4977{
4978 int irq, pos;
4979
4980 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
4981
4982again:
4983 pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
4984
4985 if (pos >= PCIE_MSI_NR_IRQS)
4986 return -ENOSPC;
4987
4988 PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
4989
4990 if (test_and_set_bit(pos, dev->msi_irq_in_use))
4991 goto again;
4992 else
4993 PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
4994
4995 if (pos >= MSM_PCIE_MAX_MSI) {
4996 PCIE_ERR(dev,
4997 "PCIe: RC%d: pos %d is not less than %d\n",
4998 dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
4999 return MSM_PCIE_ERROR;
5000 }
5001
5002 irq = dev->msi[pos].num;
5003 if (!irq) {
5004 PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
5005 dev->rc_idx);
5006 return -EINVAL;
5007 }
5008
5009 return irq;
5010}
5011
Tony Truong52122a62017-03-23 18:00:34 -07005012static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
5013 struct pci_dev *pdev,
5014 struct msi_msg *msg)
5015{
5016 struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
Tony Truong39a13792017-04-07 18:33:10 -07005017 struct iommu_domain_geometry geometry;
5018 int ret, fastmap_en = 0, bypass_en = 0;
Tony Truong52122a62017-03-23 18:00:34 -07005019 dma_addr_t iova;
Tony Truong39a13792017-04-07 18:33:10 -07005020 phys_addr_t gicm_db_offset;
Tony Truong52122a62017-03-23 18:00:34 -07005021
5022 msg->address_hi = 0;
5023 msg->address_lo = dev->msi_gicm_addr;
5024
5025 if (!domain) {
5026 PCIE_DBG(dev,
5027 "PCIe: RC%d: client does not have an iommu domain\n",
5028 dev->rc_idx);
5029 return 0;
5030 }
5031
5032 iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
5033
5034 PCIE_DBG(dev,
5035 "PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
5036 dev->rc_idx, bypass_en ? "bypass" : "enabled",
5037 pdev->bus->number, pdev->devfn);
5038
5039 if (bypass_en)
5040 return 0;
5041
Tony Truong39a13792017-04-07 18:33:10 -07005042 iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap_en);
5043 if (fastmap_en) {
5044 iommu_domain_get_attr(domain, DOMAIN_ATTR_GEOMETRY, &geometry);
5045 iova = geometry.aperture_start;
5046 PCIE_DBG(dev,
5047 "PCIe: RC%d: Use client's IOVA 0x%llx to map QGIC MSI address\n",
5048 dev->rc_idx, iova);
5049 } else {
5050 phys_addr_t pcie_base_addr;
5051
5052 /*
5053 * Use PCIe DBI address as the IOVA since client cannot
5054 * use this address for their IOMMU mapping. This will
5055 * prevent any conflicts between PCIe host and
5056 * client's mapping.
5057 */
5058 pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
5059 iova = rounddown(pcie_base_addr, PAGE_SIZE);
5060 }
Tony Truong52122a62017-03-23 18:00:34 -07005061
5062 ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
5063 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
5064 if (ret < 0) {
5065 PCIE_ERR(dev,
5066 "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
5067 dev->rc_idx, ret);
5068 return -ENOMEM;
5069 }
5070
Tony Truong39a13792017-04-07 18:33:10 -07005071 gicm_db_offset = dev->msi_gicm_addr -
5072 rounddown(dev->msi_gicm_addr, PAGE_SIZE);
Tony Truong52122a62017-03-23 18:00:34 -07005073 msg->address_lo = iova + gicm_db_offset;
5074
5075 return 0;
5076}
5077
Tony Truong349ee492014-10-01 17:35:56 -07005078static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
5079 struct msi_desc *desc, int nvec)
5080{
Tony Truong52122a62017-03-23 18:00:34 -07005081 int irq, index, ret, firstirq = 0;
Tony Truong349ee492014-10-01 17:35:56 -07005082 struct msi_msg msg;
5083 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5084
5085 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5086
5087 for (index = 0; index < nvec; index++) {
5088 irq = msm_pcie_create_irq_qgic(dev);
5089 PCIE_DBG(dev, "irq %d is allocated\n", irq);
5090
5091 if (irq < 0)
5092 return irq;
5093
5094 if (index == 0)
5095 firstirq = irq;
5096
5097 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
Tony Truongc3c52ae2017-03-29 12:16:51 -07005098 irq_set_chip_data(irq, pdev);
Tony Truong349ee492014-10-01 17:35:56 -07005099 }
5100
5101 /* write msi vector and data */
5102 irq_set_msi_desc(firstirq, desc);
Tony Truong52122a62017-03-23 18:00:34 -07005103
5104 ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
5105 if (ret)
5106 return ret;
5107
Tony Truong349ee492014-10-01 17:35:56 -07005108 msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
5109 write_msi_msg(firstirq, &msg);
5110
5111 return 0;
5112}
5113
5114int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
5115{
5116 struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
5117
5118 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5119
5120 if (dev->msi_gicm_addr)
5121 return arch_setup_msi_irq_qgic(pdev, desc, 1);
5122 else
5123 return arch_setup_msi_irq_default(pdev, desc, 1);
5124}
5125
5126static int msm_pcie_get_msi_multiple(int nvec)
5127{
5128 int msi_multiple = 0;
5129
5130 while (nvec) {
5131 nvec = nvec >> 1;
5132 msi_multiple++;
5133 }
5134 PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
5135 msi_multiple - 1);
5136
5137 return msi_multiple - 1;
5138}
5139
5140int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
5141{
5142 struct msi_desc *entry;
5143 int ret;
5144 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5145
5146 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5147
5148 if (type != PCI_CAP_ID_MSI || nvec > 32)
5149 return -ENOSPC;
5150
5151 PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
5152
5153 list_for_each_entry(entry, &dev->dev.msi_list, list) {
5154 entry->msi_attrib.multiple =
5155 msm_pcie_get_msi_multiple(nvec);
5156
5157 if (pcie_dev->msi_gicm_addr)
5158 ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
5159 else
5160 ret = arch_setup_msi_irq_default(dev, entry, nvec);
5161
5162 PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
5163
5164 if (ret < 0)
5165 return ret;
5166 if (ret > 0)
5167 return -ENOSPC;
5168 }
5169
5170 pcie_dev->use_msi = true;
5171
5172 return 0;
5173}
5174
5175static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
5176 irq_hw_number_t hwirq)
5177{
5178 irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
Tony Truong349ee492014-10-01 17:35:56 -07005179 return 0;
5180}
5181
5182static const struct irq_domain_ops msm_pcie_msi_ops = {
5183 .map = msm_pcie_msi_map,
5184};
5185
5186int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
5187{
5188 int rc;
5189 int msi_start = 0;
5190 struct device *pdev = &dev->pdev->dev;
5191
5192 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5193
5194 if (dev->rc_idx)
5195 wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
5196 else
5197 wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
5198
5199 /* register handler for linkdown interrupt */
5200 if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
5201 rc = devm_request_irq(pdev,
5202 dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
5203 handle_linkdown_irq,
5204 IRQF_TRIGGER_RISING,
5205 dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
5206 dev);
5207 if (rc) {
5208 PCIE_ERR(dev,
5209 "PCIe: Unable to request linkdown interrupt:%d\n",
5210 dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
5211 return rc;
5212 }
5213 }
5214
5215 /* register handler for physical MSI interrupt line */
5216 if (dev->irq[MSM_PCIE_INT_MSI].num) {
5217 rc = devm_request_irq(pdev,
5218 dev->irq[MSM_PCIE_INT_MSI].num,
5219 handle_msi_irq,
5220 IRQF_TRIGGER_RISING,
5221 dev->irq[MSM_PCIE_INT_MSI].name,
5222 dev);
5223 if (rc) {
5224 PCIE_ERR(dev,
5225 "PCIe: RC%d: Unable to request MSI interrupt\n",
5226 dev->rc_idx);
5227 return rc;
5228 }
5229 }
5230
5231 /* register handler for AER interrupt */
5232 if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
5233 rc = devm_request_irq(pdev,
5234 dev->irq[MSM_PCIE_INT_PLS_ERR].num,
5235 handle_aer_irq,
5236 IRQF_TRIGGER_RISING,
5237 dev->irq[MSM_PCIE_INT_PLS_ERR].name,
5238 dev);
5239 if (rc) {
5240 PCIE_ERR(dev,
5241 "PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
5242 dev->rc_idx,
5243 dev->irq[MSM_PCIE_INT_PLS_ERR].num);
5244 return rc;
5245 }
5246 }
5247
5248 /* register handler for AER legacy interrupt */
5249 if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
5250 rc = devm_request_irq(pdev,
5251 dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
5252 handle_aer_irq,
5253 IRQF_TRIGGER_RISING,
5254 dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
5255 dev);
5256 if (rc) {
5257 PCIE_ERR(dev,
5258 "PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
5259 dev->rc_idx,
5260 dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
5261 return rc;
5262 }
5263 }
5264
5265 if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
5266 rc = devm_request_irq(pdev,
5267 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
5268 handle_global_irq,
5269 IRQF_TRIGGER_RISING,
5270 dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
5271 dev);
5272 if (rc) {
5273 PCIE_ERR(dev,
5274 "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
5275 dev->rc_idx,
5276 dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
5277 return rc;
5278 }
5279 }
5280
5281 /* register handler for PCIE_WAKE_N interrupt line */
5282 if (dev->wake_n) {
5283 rc = devm_request_irq(pdev,
5284 dev->wake_n, handle_wake_irq,
5285 IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
5286 if (rc) {
5287 PCIE_ERR(dev,
5288 "PCIe: RC%d: Unable to request wake interrupt\n",
5289 dev->rc_idx);
5290 return rc;
5291 }
5292
5293 INIT_WORK(&dev->handle_wake_work, handle_wake_func);
5294
5295 rc = enable_irq_wake(dev->wake_n);
5296 if (rc) {
5297 PCIE_ERR(dev,
5298 "PCIe: RC%d: Unable to enable wake interrupt\n",
5299 dev->rc_idx);
5300 return rc;
5301 }
5302 }
5303
5304 /* Create a virtual domain of interrupts */
5305 if (!dev->msi_gicm_addr) {
5306 dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
5307 PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
5308
5309 if (!dev->irq_domain) {
5310 PCIE_ERR(dev,
5311 "PCIe: RC%d: Unable to initialize irq domain\n",
5312 dev->rc_idx);
5313
5314 if (dev->wake_n)
5315 disable_irq(dev->wake_n);
5316
5317 return PTR_ERR(dev->irq_domain);
5318 }
5319
5320 msi_start = irq_create_mapping(dev->irq_domain, 0);
5321 }
5322
5323 return 0;
5324}
5325
5326void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
5327{
5328 PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
5329
5330 wakeup_source_trash(&dev->ws);
5331
5332 if (dev->wake_n)
5333 disable_irq(dev->wake_n);
5334}
5335
Tony Truong7772e692017-04-13 17:03:34 -07005336static int msm_pcie_config_device(struct pci_dev *dev, void *pdev)
5337{
5338 struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)pdev;
5339 u8 busnr = dev->bus->number;
5340 u8 slot = PCI_SLOT(dev->devfn);
5341 u8 func = PCI_FUNC(dev->devfn);
5342
5343 PCIE_DBG(pcie_dev, "PCIe: RC%d: configure PCI device %02x:%02x.%01x\n",
5344 pcie_dev->rc_idx, busnr, slot, func);
5345
5346 return 0;
5347}
5348
5349/* Hook to setup PCI device during PCI framework scan */
5350int pcibios_add_device(struct pci_dev *dev)
5351{
5352 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5353
5354 return msm_pcie_config_device(dev, pcie_dev);
5355}
Tony Truong349ee492014-10-01 17:35:56 -07005356
5357static int msm_pcie_probe(struct platform_device *pdev)
5358{
5359 int ret = 0;
5360 int rc_idx = -1;
5361 int i, j;
5362
5363 PCIE_GEN_DBG("%s\n", __func__);
5364
5365 mutex_lock(&pcie_drv.drv_lock);
5366
5367 ret = of_property_read_u32((&pdev->dev)->of_node,
5368 "cell-index", &rc_idx);
5369 if (ret) {
5370 PCIE_GEN_DBG("Did not find RC index.\n");
5371 goto out;
5372 } else {
5373 if (rc_idx >= MAX_RC_NUM) {
5374 pr_err(
5375 "PCIe: Invalid RC Index %d (max supported = %d)\n",
5376 rc_idx, MAX_RC_NUM);
5377 goto out;
5378 }
5379 pcie_drv.rc_num++;
5380 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
5381 rc_idx);
5382 }
5383
5384 msm_pcie_dev[rc_idx].l0s_supported =
5385 of_property_read_bool((&pdev->dev)->of_node,
5386 "qcom,l0s-supported");
5387 PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
5388 msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
5389 msm_pcie_dev[rc_idx].l1_supported =
5390 of_property_read_bool((&pdev->dev)->of_node,
5391 "qcom,l1-supported");
5392 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
5393 msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
5394 msm_pcie_dev[rc_idx].l1ss_supported =
5395 of_property_read_bool((&pdev->dev)->of_node,
5396 "qcom,l1ss-supported");
5397 PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
5398 msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
5399 msm_pcie_dev[rc_idx].common_clk_en =
5400 of_property_read_bool((&pdev->dev)->of_node,
5401 "qcom,common-clk-en");
5402 PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
5403 msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
5404 msm_pcie_dev[rc_idx].clk_power_manage_en =
5405 of_property_read_bool((&pdev->dev)->of_node,
5406 "qcom,clk-power-manage-en");
5407 PCIE_DBG(&msm_pcie_dev[rc_idx],
5408 "Clock power management is %s enabled.\n",
5409 msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
5410 msm_pcie_dev[rc_idx].aux_clk_sync =
5411 of_property_read_bool((&pdev->dev)->of_node,
5412 "qcom,aux-clk-sync");
5413 PCIE_DBG(&msm_pcie_dev[rc_idx],
5414 "AUX clock is %s synchronous to Core clock.\n",
5415 msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
5416
5417 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
5418 of_property_read_bool((&pdev->dev)->of_node,
5419 "qcom,use-19p2mhz-aux-clk");
5420 PCIE_DBG(&msm_pcie_dev[rc_idx],
5421 "AUX clock frequency is %s 19.2MHz.\n",
5422 msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
5423
5424 msm_pcie_dev[rc_idx].smmu_exist =
5425 of_property_read_bool((&pdev->dev)->of_node,
5426 "qcom,smmu-exist");
5427 PCIE_DBG(&msm_pcie_dev[rc_idx],
5428 "SMMU does %s exist.\n",
5429 msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
5430
5431 msm_pcie_dev[rc_idx].smmu_sid_base = 0;
5432 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
5433 &msm_pcie_dev[rc_idx].smmu_sid_base);
5434 if (ret)
5435 PCIE_DBG(&msm_pcie_dev[rc_idx],
5436 "RC%d SMMU sid base not found\n",
5437 msm_pcie_dev[rc_idx].rc_idx);
5438 else
5439 PCIE_DBG(&msm_pcie_dev[rc_idx],
5440 "RC%d: qcom,smmu-sid-base: 0x%x.\n",
5441 msm_pcie_dev[rc_idx].rc_idx,
5442 msm_pcie_dev[rc_idx].smmu_sid_base);
5443
Tony Truong9f2c7722017-02-28 15:02:27 -08005444 msm_pcie_dev[rc_idx].boot_option = 0;
5445 ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,boot-option",
5446 &msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07005447 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08005448 "PCIe: RC%d boot option is 0x%x.\n",
5449 rc_idx, msm_pcie_dev[rc_idx].boot_option);
Tony Truong349ee492014-10-01 17:35:56 -07005450
5451 msm_pcie_dev[rc_idx].phy_ver = 1;
5452 ret = of_property_read_u32((&pdev->dev)->of_node,
5453 "qcom,pcie-phy-ver",
5454 &msm_pcie_dev[rc_idx].phy_ver);
5455 if (ret)
5456 PCIE_DBG(&msm_pcie_dev[rc_idx],
5457 "RC%d: pcie-phy-ver does not exist.\n",
5458 msm_pcie_dev[rc_idx].rc_idx);
5459 else
5460 PCIE_DBG(&msm_pcie_dev[rc_idx],
5461 "RC%d: pcie-phy-ver: %d.\n",
5462 msm_pcie_dev[rc_idx].rc_idx,
5463 msm_pcie_dev[rc_idx].phy_ver);
5464
5465 msm_pcie_dev[rc_idx].n_fts = 0;
5466 ret = of_property_read_u32((&pdev->dev)->of_node,
5467 "qcom,n-fts",
5468 &msm_pcie_dev[rc_idx].n_fts);
5469
5470 if (ret)
5471 PCIE_DBG(&msm_pcie_dev[rc_idx],
5472 "n-fts does not exist. ret=%d\n", ret);
5473 else
5474 PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
5475 msm_pcie_dev[rc_idx].n_fts);
5476
5477 msm_pcie_dev[rc_idx].common_phy =
5478 of_property_read_bool((&pdev->dev)->of_node,
5479 "qcom,common-phy");
5480 PCIE_DBG(&msm_pcie_dev[rc_idx],
5481 "PCIe: RC%d: Common PHY does %s exist.\n",
5482 rc_idx, msm_pcie_dev[rc_idx].common_phy ? "" : "not");
5483
5484 msm_pcie_dev[rc_idx].ext_ref_clk =
5485 of_property_read_bool((&pdev->dev)->of_node,
5486 "qcom,ext-ref-clk");
5487 PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
5488 msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
5489
5490 msm_pcie_dev[rc_idx].ep_latency = 0;
5491 ret = of_property_read_u32((&pdev->dev)->of_node,
5492 "qcom,ep-latency",
5493 &msm_pcie_dev[rc_idx].ep_latency);
5494 if (ret)
5495 PCIE_DBG(&msm_pcie_dev[rc_idx],
5496 "RC%d: ep-latency does not exist.\n",
5497 rc_idx);
5498 else
5499 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
5500 rc_idx, msm_pcie_dev[rc_idx].ep_latency);
5501
5502 msm_pcie_dev[rc_idx].wr_halt_size = 0;
5503 ret = of_property_read_u32(pdev->dev.of_node,
5504 "qcom,wr-halt-size",
5505 &msm_pcie_dev[rc_idx].wr_halt_size);
5506 if (ret)
5507 PCIE_DBG(&msm_pcie_dev[rc_idx],
5508 "RC%d: wr-halt-size not specified in dt. Use default value.\n",
5509 rc_idx);
5510 else
5511 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
5512 rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
5513
5514 msm_pcie_dev[rc_idx].cpl_timeout = 0;
5515 ret = of_property_read_u32((&pdev->dev)->of_node,
5516 "qcom,cpl-timeout",
5517 &msm_pcie_dev[rc_idx].cpl_timeout);
5518 if (ret)
5519 PCIE_DBG(&msm_pcie_dev[rc_idx],
5520 "RC%d: Using default cpl-timeout.\n",
5521 rc_idx);
5522 else
5523 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
5524 rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
5525
5526 msm_pcie_dev[rc_idx].perst_delay_us_min =
5527 PERST_PROPAGATION_DELAY_US_MIN;
5528 ret = of_property_read_u32(pdev->dev.of_node,
5529 "qcom,perst-delay-us-min",
5530 &msm_pcie_dev[rc_idx].perst_delay_us_min);
5531 if (ret)
5532 PCIE_DBG(&msm_pcie_dev[rc_idx],
5533 "RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
5534 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
5535 else
5536 PCIE_DBG(&msm_pcie_dev[rc_idx],
5537 "RC%d: perst-delay-us-min: %dus.\n",
5538 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
5539
5540 msm_pcie_dev[rc_idx].perst_delay_us_max =
5541 PERST_PROPAGATION_DELAY_US_MAX;
5542 ret = of_property_read_u32(pdev->dev.of_node,
5543 "qcom,perst-delay-us-max",
5544 &msm_pcie_dev[rc_idx].perst_delay_us_max);
5545 if (ret)
5546 PCIE_DBG(&msm_pcie_dev[rc_idx],
5547 "RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
5548 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
5549 else
5550 PCIE_DBG(&msm_pcie_dev[rc_idx],
5551 "RC%d: perst-delay-us-max: %dus.\n",
5552 rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
5553
5554 msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
5555 ret = of_property_read_u32(pdev->dev.of_node,
5556 "qcom,tlp-rd-size",
5557 &msm_pcie_dev[rc_idx].tlp_rd_size);
5558 if (ret)
5559 PCIE_DBG(&msm_pcie_dev[rc_idx],
5560 "RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
5561 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
5562 else
5563 PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
5564 rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
5565
5566 msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
5567 msm_pcie_dev[rc_idx].msi_gicm_base = 0;
5568 ret = of_property_read_u32((&pdev->dev)->of_node,
5569 "qcom,msi-gicm-addr",
5570 &msm_pcie_dev[rc_idx].msi_gicm_addr);
5571
5572 if (ret) {
5573 PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
5574 "msi-gicm-addr does not exist.\n");
5575 } else {
5576 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
5577 msm_pcie_dev[rc_idx].msi_gicm_addr);
5578
5579 ret = of_property_read_u32((&pdev->dev)->of_node,
5580 "qcom,msi-gicm-base",
5581 &msm_pcie_dev[rc_idx].msi_gicm_base);
5582
5583 if (ret) {
5584 PCIE_ERR(&msm_pcie_dev[rc_idx],
5585 "PCIe: RC%d: msi-gicm-base does not exist.\n",
5586 rc_idx);
5587 goto decrease_rc_num;
5588 } else {
5589 PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
5590 msm_pcie_dev[rc_idx].msi_gicm_base);
5591 }
5592 }
5593
5594 msm_pcie_dev[rc_idx].scm_dev_id = 0;
5595 ret = of_property_read_u32((&pdev->dev)->of_node,
5596 "qcom,scm-dev-id",
5597 &msm_pcie_dev[rc_idx].scm_dev_id);
5598
5599 msm_pcie_dev[rc_idx].rc_idx = rc_idx;
5600 msm_pcie_dev[rc_idx].pdev = pdev;
5601 msm_pcie_dev[rc_idx].vreg_n = 0;
5602 msm_pcie_dev[rc_idx].gpio_n = 0;
5603 msm_pcie_dev[rc_idx].parf_deemph = 0;
5604 msm_pcie_dev[rc_idx].parf_swing = 0;
5605 msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
5606 msm_pcie_dev[rc_idx].user_suspend = false;
5607 msm_pcie_dev[rc_idx].disable_pc = false;
5608 msm_pcie_dev[rc_idx].saved_state = NULL;
5609 msm_pcie_dev[rc_idx].enumerated = false;
5610 msm_pcie_dev[rc_idx].num_active_ep = 0;
5611 msm_pcie_dev[rc_idx].num_ep = 0;
5612 msm_pcie_dev[rc_idx].pending_ep_reg = false;
5613 msm_pcie_dev[rc_idx].phy_len = 0;
5614 msm_pcie_dev[rc_idx].port_phy_len = 0;
5615 msm_pcie_dev[rc_idx].phy_sequence = NULL;
5616 msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
5617 msm_pcie_dev[rc_idx].event_reg = NULL;
5618 msm_pcie_dev[rc_idx].linkdown_counter = 0;
5619 msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
5620 msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
5621 msm_pcie_dev[rc_idx].rc_corr_counter = 0;
5622 msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
5623 msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
5624 msm_pcie_dev[rc_idx].ep_corr_counter = 0;
5625 msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
5626 msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
5627 msm_pcie_dev[rc_idx].suspending = false;
5628 msm_pcie_dev[rc_idx].wake_counter = 0;
5629 msm_pcie_dev[rc_idx].aer_enable = true;
5630 msm_pcie_dev[rc_idx].power_on = false;
5631 msm_pcie_dev[rc_idx].current_short_bdf = 0;
5632 msm_pcie_dev[rc_idx].use_msi = false;
5633 msm_pcie_dev[rc_idx].use_pinctrl = false;
5634 msm_pcie_dev[rc_idx].linkdown_panic = false;
5635 msm_pcie_dev[rc_idx].bridge_found = false;
5636 memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
5637 sizeof(msm_pcie_vreg_info));
5638 memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
5639 sizeof(msm_pcie_gpio_info));
5640 memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
5641 sizeof(msm_pcie_clk_info[rc_idx]));
5642 memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
5643 sizeof(msm_pcie_pipe_clk_info[rc_idx]));
5644 memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
5645 sizeof(msm_pcie_res_info));
5646 memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
5647 sizeof(msm_pcie_irq_info));
5648 memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
5649 sizeof(msm_pcie_msi_info));
5650 memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
5651 sizeof(msm_pcie_reset_info[rc_idx]));
5652 memcpy(msm_pcie_dev[rc_idx].pipe_reset,
5653 msm_pcie_pipe_reset_info[rc_idx],
5654 sizeof(msm_pcie_pipe_reset_info[rc_idx]));
5655 msm_pcie_dev[rc_idx].shadow_en = true;
5656 for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
5657 msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
5658 for (i = 0; i < MAX_DEVICE_NUM; i++)
5659 for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
5660 msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
5661 for (i = 0; i < MAX_DEVICE_NUM; i++) {
5662 msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
5663 msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
5664 msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
5665 msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
5666 msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
5667 msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0;
5668 msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
5669 msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
5670 msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
5671 msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
5672 }
5673
Tony Truongbd9a3412017-02-27 18:30:13 -08005674 dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
Tony Truongbd9a3412017-02-27 18:30:13 -08005675
Tony Truong349ee492014-10-01 17:35:56 -07005676 ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
5677 msm_pcie_dev[rc_idx].pdev);
5678
5679 if (ret)
5680 goto decrease_rc_num;
5681
5682 msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
5683 if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
5684 PCIE_ERR(&msm_pcie_dev[rc_idx],
5685 "PCIe: RC%d failed to get pinctrl\n",
5686 rc_idx);
5687 else
5688 msm_pcie_dev[rc_idx].use_pinctrl = true;
5689
5690 if (msm_pcie_dev[rc_idx].use_pinctrl) {
5691 msm_pcie_dev[rc_idx].pins_default =
5692 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
5693 "default");
5694 if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
5695 PCIE_ERR(&msm_pcie_dev[rc_idx],
5696 "PCIe: RC%d could not get pinctrl default state\n",
5697 rc_idx);
5698 msm_pcie_dev[rc_idx].pins_default = NULL;
5699 }
5700
5701 msm_pcie_dev[rc_idx].pins_sleep =
5702 pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
5703 "sleep");
5704 if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
5705 PCIE_ERR(&msm_pcie_dev[rc_idx],
5706 "PCIe: RC%d could not get pinctrl sleep state\n",
5707 rc_idx);
5708 msm_pcie_dev[rc_idx].pins_sleep = NULL;
5709 }
5710 }
5711
5712 ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
5713 if (ret) {
5714 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
5715 goto decrease_rc_num;
5716 }
5717
5718 ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
5719 if (ret) {
5720 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
5721 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
5722 goto decrease_rc_num;
5723 }
5724
Tony Truong14a5ddf2017-04-20 11:04:03 -07005725 msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
5726
Tony Truong349ee492014-10-01 17:35:56 -07005727 msm_pcie_dev[rc_idx].drv_ready = true;
5728
Tony Truong9f2c7722017-02-28 15:02:27 -08005729 if (msm_pcie_dev[rc_idx].boot_option &
5730 MSM_PCIE_NO_PROBE_ENUMERATION) {
Tony Truong349ee492014-10-01 17:35:56 -07005731 PCIE_DBG(&msm_pcie_dev[rc_idx],
Tony Truong9f2c7722017-02-28 15:02:27 -08005732 "PCIe: RC%d will be enumerated by client or endpoint.\n",
Tony Truong349ee492014-10-01 17:35:56 -07005733 rc_idx);
5734 mutex_unlock(&pcie_drv.drv_lock);
5735 return 0;
5736 }
5737
5738 ret = msm_pcie_enumerate(rc_idx);
5739
5740 if (ret)
5741 PCIE_ERR(&msm_pcie_dev[rc_idx],
5742 "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
5743 rc_idx);
5744 else
5745 PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
5746 rc_idx);
5747
5748 PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
5749 dev_name(&(pdev->dev)));
5750
5751 mutex_unlock(&pcie_drv.drv_lock);
5752 return 0;
5753
5754decrease_rc_num:
5755 pcie_drv.rc_num--;
5756out:
5757 if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
5758 pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
5759 rc_idx);
5760 else
5761 PCIE_ERR(&msm_pcie_dev[rc_idx],
5762 "PCIe: Driver probe failed for RC%d:%d\n",
5763 rc_idx, ret);
5764
5765 mutex_unlock(&pcie_drv.drv_lock);
5766
5767 return ret;
5768}
5769
5770static int msm_pcie_remove(struct platform_device *pdev)
5771{
5772 int ret = 0;
5773 int rc_idx;
5774
5775 PCIE_GEN_DBG("PCIe:%s.\n", __func__);
5776
5777 mutex_lock(&pcie_drv.drv_lock);
5778
5779 ret = of_property_read_u32((&pdev->dev)->of_node,
5780 "cell-index", &rc_idx);
5781 if (ret) {
5782 pr_err("%s: Did not find RC index.\n", __func__);
5783 goto out;
5784 } else {
5785 pcie_drv.rc_num--;
5786 PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
5787 }
5788
5789 msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
5790 msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
5791 msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
5792 msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
5793 msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
5794
5795out:
5796 mutex_unlock(&pcie_drv.drv_lock);
5797
5798 return ret;
5799}
5800
5801static const struct of_device_id msm_pcie_match[] = {
5802 { .compatible = "qcom,pci-msm",
5803 },
5804 {}
5805};
5806
5807static struct platform_driver msm_pcie_driver = {
5808 .probe = msm_pcie_probe,
5809 .remove = msm_pcie_remove,
5810 .driver = {
5811 .name = "pci-msm",
5812 .owner = THIS_MODULE,
5813 .of_match_table = msm_pcie_match,
5814 },
5815};
5816
5817int __init pcie_init(void)
5818{
5819 int ret = 0, i;
5820 char rc_name[MAX_RC_NAME_LEN];
5821
5822 pr_alert("pcie:%s.\n", __func__);
5823
5824 pcie_drv.rc_num = 0;
5825 mutex_init(&pcie_drv.drv_lock);
5826 mutex_init(&com_phy_lock);
5827
5828 for (i = 0; i < MAX_RC_NUM; i++) {
5829 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
5830 msm_pcie_dev[i].ipc_log =
5831 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
5832 if (msm_pcie_dev[i].ipc_log == NULL)
5833 pr_err("%s: unable to create IPC log context for %s\n",
5834 __func__, rc_name);
5835 else
5836 PCIE_DBG(&msm_pcie_dev[i],
5837 "PCIe IPC logging is enable for RC%d\n",
5838 i);
5839 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
5840 msm_pcie_dev[i].ipc_log_long =
5841 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
5842 if (msm_pcie_dev[i].ipc_log_long == NULL)
5843 pr_err("%s: unable to create IPC log context for %s\n",
5844 __func__, rc_name);
5845 else
5846 PCIE_DBG(&msm_pcie_dev[i],
5847 "PCIe IPC logging %s is enable for RC%d\n",
5848 rc_name, i);
5849 snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
5850 msm_pcie_dev[i].ipc_log_dump =
5851 ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
5852 if (msm_pcie_dev[i].ipc_log_dump == NULL)
5853 pr_err("%s: unable to create IPC log context for %s\n",
5854 __func__, rc_name);
5855 else
5856 PCIE_DBG(&msm_pcie_dev[i],
5857 "PCIe IPC logging %s is enable for RC%d\n",
5858 rc_name, i);
5859 spin_lock_init(&msm_pcie_dev[i].cfg_lock);
5860 msm_pcie_dev[i].cfg_access = true;
5861 mutex_init(&msm_pcie_dev[i].enumerate_lock);
5862 mutex_init(&msm_pcie_dev[i].setup_lock);
5863 mutex_init(&msm_pcie_dev[i].recovery_lock);
5864 spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
5865 spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
5866 spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
5867 spin_lock_init(&msm_pcie_dev[i].aer_lock);
5868 msm_pcie_dev[i].drv_ready = false;
5869 }
5870 for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
5871 msm_pcie_dev_tbl[i].bdf = 0;
5872 msm_pcie_dev_tbl[i].dev = NULL;
5873 msm_pcie_dev_tbl[i].short_bdf = 0;
5874 msm_pcie_dev_tbl[i].sid = 0;
5875 msm_pcie_dev_tbl[i].domain = -1;
5876 msm_pcie_dev_tbl[i].conf_base = 0;
5877 msm_pcie_dev_tbl[i].phy_address = 0;
5878 msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
5879 msm_pcie_dev_tbl[i].event_reg = NULL;
5880 msm_pcie_dev_tbl[i].registered = true;
5881 }
5882
5883 msm_pcie_debugfs_init();
5884
5885 ret = platform_driver_register(&msm_pcie_driver);
5886
5887 return ret;
5888}
5889
5890static void __exit pcie_exit(void)
5891{
Tony Truongbd9a3412017-02-27 18:30:13 -08005892 int i;
5893
Tony Truong349ee492014-10-01 17:35:56 -07005894 PCIE_GEN_DBG("pcie:%s.\n", __func__);
5895
5896 platform_driver_unregister(&msm_pcie_driver);
5897
5898 msm_pcie_debugfs_exit();
Tony Truongbd9a3412017-02-27 18:30:13 -08005899
5900 for (i = 0; i < MAX_RC_NUM; i++)
5901 msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
Tony Truong349ee492014-10-01 17:35:56 -07005902}
5903
5904subsys_initcall_sync(pcie_init);
5905module_exit(pcie_exit);
5906
5907
5908/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
5909static void msm_pcie_fixup_early(struct pci_dev *dev)
5910{
5911 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5912
5913 PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
5914 if (dev->hdr_type == 1)
5915 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
5916}
5917DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
5918 msm_pcie_fixup_early);
5919
5920/* Suspend the PCIe link */
5921static int msm_pcie_pm_suspend(struct pci_dev *dev,
5922 void *user, void *data, u32 options)
5923{
5924 int ret = 0;
5925 u32 val = 0;
5926 int ret_l23;
5927 unsigned long irqsave_flags;
5928 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5929
5930 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
5931
5932 spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
5933 pcie_dev->suspending = true;
5934 spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
5935
5936 if (!pcie_dev->power_on) {
5937 PCIE_DBG(pcie_dev,
5938 "PCIe: power of RC%d has been turned off.\n",
5939 pcie_dev->rc_idx);
5940 return ret;
5941 }
5942
5943 if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
5944 && msm_pcie_confirm_linkup(pcie_dev, true, true,
5945 pcie_dev->conf)) {
5946 ret = pci_save_state(dev);
5947 pcie_dev->saved_state = pci_store_saved_state(dev);
5948 }
5949 if (ret) {
5950 PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
5951 pcie_dev->rc_idx, ret);
5952 pcie_dev->suspending = false;
5953 return ret;
5954 }
5955
5956 spin_lock_irqsave(&pcie_dev->cfg_lock,
5957 pcie_dev->irqsave_flags);
5958 pcie_dev->cfg_access = false;
5959 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
5960 pcie_dev->irqsave_flags);
5961
5962 msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
5963 BIT(4));
5964
5965 PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
5966 pcie_dev->rc_idx);
5967
5968 ret_l23 = readl_poll_timeout((pcie_dev->parf
5969 + PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
5970
5971 /* check L23_Ready */
5972 PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
5973 pcie_dev->rc_idx,
5974 readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
5975 if (!ret_l23)
5976 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
5977 pcie_dev->rc_idx);
5978 else
5979 PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
5980 pcie_dev->rc_idx);
5981
Tony Truong349ee492014-10-01 17:35:56 -07005982 if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
5983 pinctrl_select_state(pcie_dev->pinctrl,
5984 pcie_dev->pins_sleep);
5985
Tony Truong4e969782017-04-28 18:17:04 -07005986 msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
5987
Tony Truong349ee492014-10-01 17:35:56 -07005988 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
5989
5990 return ret;
5991}
5992
5993static void msm_pcie_fixup_suspend(struct pci_dev *dev)
5994{
5995 int ret;
5996 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
5997
5998 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
5999
6000 if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
6001 return;
6002
6003 spin_lock_irqsave(&pcie_dev->cfg_lock,
6004 pcie_dev->irqsave_flags);
6005 if (pcie_dev->disable_pc) {
6006 PCIE_DBG(pcie_dev,
6007 "RC%d: Skip suspend because of user request\n",
6008 pcie_dev->rc_idx);
6009 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6010 pcie_dev->irqsave_flags);
6011 return;
6012 }
6013 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6014 pcie_dev->irqsave_flags);
6015
6016 mutex_lock(&pcie_dev->recovery_lock);
6017
6018 ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
6019 if (ret)
6020 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
6021 pcie_dev->rc_idx, ret);
6022
6023 mutex_unlock(&pcie_dev->recovery_lock);
6024}
6025DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6026 msm_pcie_fixup_suspend);
6027
6028/* Resume the PCIe link */
6029static int msm_pcie_pm_resume(struct pci_dev *dev,
6030 void *user, void *data, u32 options)
6031{
6032 int ret;
6033 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6034
6035 PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
6036
6037 if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
6038 pinctrl_select_state(pcie_dev->pinctrl,
6039 pcie_dev->pins_default);
6040
6041 spin_lock_irqsave(&pcie_dev->cfg_lock,
6042 pcie_dev->irqsave_flags);
6043 pcie_dev->cfg_access = true;
6044 spin_unlock_irqrestore(&pcie_dev->cfg_lock,
6045 pcie_dev->irqsave_flags);
6046
6047 ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
6048 if (ret) {
6049 PCIE_ERR(pcie_dev,
6050 "PCIe: RC%d fail to enable PCIe link in resume.\n",
6051 pcie_dev->rc_idx);
6052 return ret;
6053 }
6054
6055 pcie_dev->suspending = false;
6056 PCIE_DBG(pcie_dev,
6057 "dev->bus->number = %d dev->bus->primary = %d\n",
6058 dev->bus->number, dev->bus->primary);
6059
6060 if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
6061 PCIE_DBG(pcie_dev,
6062 "RC%d: entry of PCI framework restore state\n",
6063 pcie_dev->rc_idx);
6064
6065 pci_load_and_free_saved_state(dev,
6066 &pcie_dev->saved_state);
6067 pci_restore_state(dev);
6068
6069 PCIE_DBG(pcie_dev,
6070 "RC%d: exit of PCI framework restore state\n",
6071 pcie_dev->rc_idx);
6072 }
6073
6074 if (pcie_dev->bridge_found) {
6075 PCIE_DBG(pcie_dev,
6076 "RC%d: entry of PCIe recover config\n",
6077 pcie_dev->rc_idx);
6078
6079 msm_pcie_recover_config(dev);
6080
6081 PCIE_DBG(pcie_dev,
6082 "RC%d: exit of PCIe recover config\n",
6083 pcie_dev->rc_idx);
6084 }
6085
6086 PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
6087
6088 return ret;
6089}
6090
6091void msm_pcie_fixup_resume(struct pci_dev *dev)
6092{
6093 int ret;
6094 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6095
6096 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6097
6098 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6099 pcie_dev->user_suspend)
6100 return;
6101
6102 mutex_lock(&pcie_dev->recovery_lock);
6103 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6104 if (ret)
6105 PCIE_ERR(pcie_dev,
6106 "PCIe: RC%d got failure in fixup resume:%d.\n",
6107 pcie_dev->rc_idx, ret);
6108
6109 mutex_unlock(&pcie_dev->recovery_lock);
6110}
6111DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6112 msm_pcie_fixup_resume);
6113
6114void msm_pcie_fixup_resume_early(struct pci_dev *dev)
6115{
6116 int ret;
6117 struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6118
6119 PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
6120
6121 if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
6122 pcie_dev->user_suspend)
6123 return;
6124
6125 mutex_lock(&pcie_dev->recovery_lock);
6126 ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
6127 if (ret)
6128 PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
6129 pcie_dev->rc_idx, ret);
6130
6131 mutex_unlock(&pcie_dev->recovery_lock);
6132}
6133DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
6134 msm_pcie_fixup_resume_early);
6135
6136int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
6137 void *data, u32 options)
6138{
6139 int i, ret = 0;
6140 struct pci_dev *dev;
6141 u32 rc_idx = 0;
6142 struct msm_pcie_dev_t *pcie_dev;
6143
6144 PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
6145 pm_opt, busnr, options);
6146
6147
6148 if (!user) {
6149 pr_err("PCIe: endpoint device is NULL\n");
6150 ret = -ENODEV;
6151 goto out;
6152 }
6153
6154 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
6155
6156 if (pcie_dev) {
6157 rc_idx = pcie_dev->rc_idx;
6158 PCIE_DBG(pcie_dev,
6159 "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
6160 rc_idx, pm_opt, busnr, options);
6161 } else {
6162 pr_err(
6163 "PCIe: did not find RC for pci endpoint device.\n"
6164 );
6165 ret = -ENODEV;
6166 goto out;
6167 }
6168
6169 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6170 if (!busnr)
6171 break;
6172 if (user == pcie_dev->pcidev_table[i].dev) {
6173 if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
6174 break;
6175
6176 PCIE_ERR(pcie_dev,
6177 "PCIe: RC%d: bus number %d does not match with the expected value %d\n",
6178 pcie_dev->rc_idx, busnr,
6179 pcie_dev->pcidev_table[i].bdf >> 24);
6180 ret = MSM_PCIE_ERROR;
6181 goto out;
6182 }
6183 }
6184
6185 if (i == MAX_DEVICE_NUM) {
6186 PCIE_ERR(pcie_dev,
6187 "PCIe: RC%d: endpoint device was not found in device table",
6188 pcie_dev->rc_idx);
6189 ret = MSM_PCIE_ERROR;
6190 goto out;
6191 }
6192
6193 dev = msm_pcie_dev[rc_idx].dev;
6194
6195 if (!msm_pcie_dev[rc_idx].drv_ready) {
6196 PCIE_ERR(&msm_pcie_dev[rc_idx],
6197 "RC%d has not been successfully probed yet\n",
6198 rc_idx);
6199 return -EPROBE_DEFER;
6200 }
6201
6202 switch (pm_opt) {
6203 case MSM_PCIE_SUSPEND:
6204 PCIE_DBG(&msm_pcie_dev[rc_idx],
6205 "User of RC%d requests to suspend the link\n", rc_idx);
6206 if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
6207 PCIE_DBG(&msm_pcie_dev[rc_idx],
6208 "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
6209 rc_idx, msm_pcie_dev[rc_idx].link_status);
6210
6211 if (!msm_pcie_dev[rc_idx].power_on) {
6212 PCIE_ERR(&msm_pcie_dev[rc_idx],
6213 "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
6214 rc_idx, msm_pcie_dev[rc_idx].link_status);
6215 break;
6216 }
6217
6218 if (msm_pcie_dev[rc_idx].pending_ep_reg) {
6219 PCIE_DBG(&msm_pcie_dev[rc_idx],
6220 "PCIe: RC%d: request to suspend the link is rejected\n",
6221 rc_idx);
6222 break;
6223 }
6224
6225 if (pcie_dev->num_active_ep) {
6226 PCIE_DBG(pcie_dev,
6227 "RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
6228 pcie_dev->rc_idx, pcie_dev->num_active_ep);
6229 return ret;
6230 }
6231
6232 msm_pcie_dev[rc_idx].user_suspend = true;
6233
6234 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6235
6236 ret = msm_pcie_pm_suspend(dev, user, data, options);
6237 if (ret) {
6238 PCIE_ERR(&msm_pcie_dev[rc_idx],
6239 "PCIe: RC%d: user failed to suspend the link.\n",
6240 rc_idx);
6241 msm_pcie_dev[rc_idx].user_suspend = false;
6242 }
6243
6244 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6245 break;
6246 case MSM_PCIE_RESUME:
6247 PCIE_DBG(&msm_pcie_dev[rc_idx],
6248 "User of RC%d requests to resume the link\n", rc_idx);
6249 if (msm_pcie_dev[rc_idx].link_status !=
6250 MSM_PCIE_LINK_DISABLED) {
6251 PCIE_ERR(&msm_pcie_dev[rc_idx],
6252 "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
6253 rc_idx, msm_pcie_dev[rc_idx].link_status,
6254 msm_pcie_dev[rc_idx].num_active_ep);
6255 break;
6256 }
6257
6258 mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
6259 ret = msm_pcie_pm_resume(dev, user, data, options);
6260 if (ret) {
6261 PCIE_ERR(&msm_pcie_dev[rc_idx],
6262 "PCIe: RC%d: user failed to resume the link.\n",
6263 rc_idx);
6264 } else {
6265 PCIE_DBG(&msm_pcie_dev[rc_idx],
6266 "PCIe: RC%d: user succeeded to resume the link.\n",
6267 rc_idx);
6268
6269 msm_pcie_dev[rc_idx].user_suspend = false;
6270 }
6271
6272 mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
6273
6274 break;
6275 case MSM_PCIE_DISABLE_PC:
6276 PCIE_DBG(&msm_pcie_dev[rc_idx],
6277 "User of RC%d requests to keep the link always alive.\n",
6278 rc_idx);
6279 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6280 msm_pcie_dev[rc_idx].irqsave_flags);
6281 if (msm_pcie_dev[rc_idx].suspending) {
6282 PCIE_ERR(&msm_pcie_dev[rc_idx],
6283 "PCIe: RC%d Link has been suspended before request\n",
6284 rc_idx);
6285 ret = MSM_PCIE_ERROR;
6286 } else {
6287 msm_pcie_dev[rc_idx].disable_pc = true;
6288 }
6289 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6290 msm_pcie_dev[rc_idx].irqsave_flags);
6291 break;
6292 case MSM_PCIE_ENABLE_PC:
6293 PCIE_DBG(&msm_pcie_dev[rc_idx],
6294 "User of RC%d cancels the request of alive link.\n",
6295 rc_idx);
6296 spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
6297 msm_pcie_dev[rc_idx].irqsave_flags);
6298 msm_pcie_dev[rc_idx].disable_pc = false;
6299 spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
6300 msm_pcie_dev[rc_idx].irqsave_flags);
6301 break;
6302 default:
6303 PCIE_ERR(&msm_pcie_dev[rc_idx],
6304 "PCIe: RC%d: unsupported pm operation:%d.\n",
6305 rc_idx, pm_opt);
6306 ret = -ENODEV;
6307 goto out;
6308 }
6309
6310out:
6311 return ret;
6312}
6313EXPORT_SYMBOL(msm_pcie_pm_control);
6314
6315int msm_pcie_register_event(struct msm_pcie_register_event *reg)
6316{
6317 int i, ret = 0;
6318 struct msm_pcie_dev_t *pcie_dev;
6319
6320 if (!reg) {
6321 pr_err("PCIe: Event registration is NULL\n");
6322 return -ENODEV;
6323 }
6324
6325 if (!reg->user) {
6326 pr_err("PCIe: User of event registration is NULL\n");
6327 return -ENODEV;
6328 }
6329
6330 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6331
6332 if (!pcie_dev) {
6333 PCIE_ERR(pcie_dev, "%s",
6334 "PCIe: did not find RC for pci endpoint device.\n");
6335 return -ENODEV;
6336 }
6337
6338 if (pcie_dev->num_ep > 1) {
6339 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6340 if (reg->user ==
6341 pcie_dev->pcidev_table[i].dev) {
6342 pcie_dev->event_reg =
6343 pcie_dev->pcidev_table[i].event_reg;
6344
6345 if (!pcie_dev->event_reg) {
6346 pcie_dev->pcidev_table[i].registered =
6347 true;
6348
6349 pcie_dev->num_active_ep++;
6350 PCIE_DBG(pcie_dev,
6351 "PCIe: RC%d: number of active EP(s): %d.\n",
6352 pcie_dev->rc_idx,
6353 pcie_dev->num_active_ep);
6354 }
6355
6356 pcie_dev->event_reg = reg;
6357 pcie_dev->pcidev_table[i].event_reg = reg;
6358 PCIE_DBG(pcie_dev,
6359 "Event 0x%x is registered for RC %d\n",
6360 reg->events,
6361 pcie_dev->rc_idx);
6362
6363 break;
6364 }
6365 }
6366
6367 if (pcie_dev->pending_ep_reg) {
6368 for (i = 0; i < MAX_DEVICE_NUM; i++)
6369 if (!pcie_dev->pcidev_table[i].registered)
6370 break;
6371
6372 if (i == MAX_DEVICE_NUM)
6373 pcie_dev->pending_ep_reg = false;
6374 }
6375 } else {
6376 pcie_dev->event_reg = reg;
6377 PCIE_DBG(pcie_dev,
6378 "Event 0x%x is registered for RC %d\n", reg->events,
6379 pcie_dev->rc_idx);
6380 }
6381
6382 return ret;
6383}
6384EXPORT_SYMBOL(msm_pcie_register_event);
6385
6386int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
6387{
6388 int i, ret = 0;
6389 struct msm_pcie_dev_t *pcie_dev;
6390
6391 if (!reg) {
6392 pr_err("PCIe: Event deregistration is NULL\n");
6393 return -ENODEV;
6394 }
6395
6396 if (!reg->user) {
6397 pr_err("PCIe: User of event deregistration is NULL\n");
6398 return -ENODEV;
6399 }
6400
6401 pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
6402
6403 if (!pcie_dev) {
6404 PCIE_ERR(pcie_dev, "%s",
6405 "PCIe: did not find RC for pci endpoint device.\n");
6406 return -ENODEV;
6407 }
6408
6409 if (pcie_dev->num_ep > 1) {
6410 for (i = 0; i < MAX_DEVICE_NUM; i++) {
6411 if (reg->user == pcie_dev->pcidev_table[i].dev) {
6412 if (pcie_dev->pcidev_table[i].event_reg) {
6413 pcie_dev->num_active_ep--;
6414 PCIE_DBG(pcie_dev,
6415 "PCIe: RC%d: number of active EP(s) left: %d.\n",
6416 pcie_dev->rc_idx,
6417 pcie_dev->num_active_ep);
6418 }
6419
6420 pcie_dev->event_reg = NULL;
6421 pcie_dev->pcidev_table[i].event_reg = NULL;
6422 PCIE_DBG(pcie_dev,
6423 "Event is deregistered for RC %d\n",
6424 pcie_dev->rc_idx);
6425
6426 break;
6427 }
6428 }
6429 } else {
6430 pcie_dev->event_reg = NULL;
6431 PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
6432 pcie_dev->rc_idx);
6433 }
6434
6435 return ret;
6436}
6437EXPORT_SYMBOL(msm_pcie_deregister_event);
6438
6439int msm_pcie_recover_config(struct pci_dev *dev)
6440{
6441 int ret = 0;
6442 struct msm_pcie_dev_t *pcie_dev;
6443
6444 if (dev) {
6445 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6446 PCIE_DBG(pcie_dev,
6447 "Recovery for the link of RC%d\n", pcie_dev->rc_idx);
6448 } else {
6449 pr_err("PCIe: the input pci dev is NULL.\n");
6450 return -ENODEV;
6451 }
6452
6453 if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
6454 PCIE_DBG(pcie_dev,
6455 "Recover config space of RC%d and its EP\n",
6456 pcie_dev->rc_idx);
6457 pcie_dev->shadow_en = false;
6458 PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
6459 msm_pcie_cfg_recover(pcie_dev, true);
6460 PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
6461 msm_pcie_cfg_recover(pcie_dev, false);
6462 PCIE_DBG(pcie_dev,
6463 "Refreshing the saved config space in PCI framework for RC%d and its EP\n",
6464 pcie_dev->rc_idx);
6465 pci_save_state(pcie_dev->dev);
6466 pci_save_state(dev);
6467 pcie_dev->shadow_en = true;
6468 PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
6469 pcie_dev->rc_idx);
6470 } else {
6471 PCIE_ERR(pcie_dev,
6472 "PCIe: the link of RC%d is not up yet; can't recover config space.\n",
6473 pcie_dev->rc_idx);
6474 ret = -ENODEV;
6475 }
6476
6477 return ret;
6478}
6479EXPORT_SYMBOL(msm_pcie_recover_config);
6480
6481int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
6482{
6483 int ret = 0;
6484 struct msm_pcie_dev_t *pcie_dev;
6485
6486 if (dev) {
6487 pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
6488 PCIE_DBG(pcie_dev,
6489 "User requests to %s shadow\n",
6490 enable ? "enable" : "disable");
6491 } else {
6492 pr_err("PCIe: the input pci dev is NULL.\n");
6493 return -ENODEV;
6494 }
6495
6496 PCIE_DBG(pcie_dev,
6497 "The shadowing of RC%d is %s enabled currently.\n",
6498 pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
6499
6500 pcie_dev->shadow_en = enable;
6501
6502 PCIE_DBG(pcie_dev,
6503 "Shadowing of RC%d is turned %s upon user's request.\n",
6504 pcie_dev->rc_idx, enable ? "on" : "off");
6505
6506 return ret;
6507}
6508EXPORT_SYMBOL(msm_pcie_shadow_control);