blob: 1ecff2e07771ff801e965f1a5fdeb4c56a4372c8 [file] [log] [blame]
Stanimir Varbanov82a82382015-12-18 14:38:57 +02001/*
Paul Gortmakerf9a66602016-08-24 16:57:48 -04002 * Qualcomm PCIe root complex driver
3 *
Stanimir Varbanov82a82382015-12-18 14:38:57 +02004 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
5 * Copyright 2015 Linaro Limited.
6 *
Paul Gortmakerf9a66602016-08-24 16:57:48 -04007 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
8 *
Stanimir Varbanov82a82382015-12-18 14:38:57 +02009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 and
11 * only version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/clk.h>
20#include <linux/delay.h>
21#include <linux/gpio.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/iopoll.h>
25#include <linux/kernel.h>
Paul Gortmakerf9a66602016-08-24 16:57:48 -040026#include <linux/init.h>
Stanimir Varbanov82a82382015-12-18 14:38:57 +020027#include <linux/of_device.h>
28#include <linux/of_gpio.h>
29#include <linux/pci.h>
30#include <linux/platform_device.h>
31#include <linux/phy/phy.h>
32#include <linux/regulator/consumer.h>
33#include <linux/reset.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36
37#include "pcie-designware.h"
38
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000039#define PCIE20_PARF_SYS_CTRL 0x00
Stanimir Varbanov82a82382015-12-18 14:38:57 +020040#define PCIE20_PARF_PHY_CTRL 0x40
41#define PCIE20_PARF_PHY_REFCLK 0x4C
42#define PCIE20_PARF_DBI_BASE_ADDR 0x168
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000043#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
44#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
Stanimir Varbanov82a82382015-12-18 14:38:57 +020045#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000046#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
47#define PCIE20_PARF_LTSSM 0x1B0
48#define PCIE20_PARF_SID_OFFSET 0x234
49#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
Stanimir Varbanov82a82382015-12-18 14:38:57 +020050
51#define PCIE20_ELBI_SYS_CTRL 0x04
52#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
53
54#define PCIE20_CAP 0x70
55
56#define PERST_DELAY_US 1000
57
58struct qcom_pcie_resources_v0 {
59 struct clk *iface_clk;
60 struct clk *core_clk;
61 struct clk *phy_clk;
62 struct reset_control *pci_reset;
63 struct reset_control *axi_reset;
64 struct reset_control *ahb_reset;
65 struct reset_control *por_reset;
66 struct reset_control *phy_reset;
67 struct regulator *vdda;
68 struct regulator *vdda_phy;
69 struct regulator *vdda_refclk;
70};
71
72struct qcom_pcie_resources_v1 {
73 struct clk *iface;
74 struct clk *aux;
75 struct clk *master_bus;
76 struct clk *slave_bus;
77 struct reset_control *core;
78 struct regulator *vdda;
79};
80
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000081struct qcom_pcie_resources_v2 {
82 struct clk *aux_clk;
83 struct clk *master_clk;
84 struct clk *slave_clk;
85 struct clk *cfg_clk;
86 struct clk *pipe_clk;
87};
88
Stanimir Varbanov82a82382015-12-18 14:38:57 +020089union qcom_pcie_resources {
90 struct qcom_pcie_resources_v0 v0;
91 struct qcom_pcie_resources_v1 v1;
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000092 struct qcom_pcie_resources_v2 v2;
Stanimir Varbanov82a82382015-12-18 14:38:57 +020093};
94
95struct qcom_pcie;
96
97struct qcom_pcie_ops {
98 int (*get_resources)(struct qcom_pcie *pcie);
99 int (*init)(struct qcom_pcie *pcie);
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000100 int (*post_init)(struct qcom_pcie *pcie);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200101 void (*deinit)(struct qcom_pcie *pcie);
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000102 void (*ltssm_enable)(struct qcom_pcie *pcie);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200103};
104
105struct qcom_pcie {
Bjorn Helgaasee053692016-10-06 13:39:37 -0500106 struct pcie_port pp; /* pp.dbi_base is DT dbi */
107 void __iomem *parf; /* DT parf */
108 void __iomem *elbi; /* DT elbi */
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200109 union qcom_pcie_resources res;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200110 struct phy *phy;
111 struct gpio_desc *reset;
112 struct qcom_pcie_ops *ops;
113};
114
115#define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp)
116
117static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
118{
119 gpiod_set_value(pcie->reset, 1);
120 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
121}
122
123static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
124{
125 gpiod_set_value(pcie->reset, 0);
126 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
127}
128
129static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
130{
131 struct pcie_port *pp = arg;
132
133 return dw_handle_msi_irq(pp);
134}
135
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000136static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200137{
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200138 u32 val;
139
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200140 /* enable link training */
141 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
142 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
143 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000144}
145
146static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
147{
148 u32 val;
149
150 /* enable link training */
151 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
152 val |= BIT(8);
153 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
154}
155
156static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
157{
158
159 if (dw_pcie_link_up(&pcie->pp))
160 return 0;
161
162 /* Enable Link Training state machine */
163 if (pcie->ops->ltssm_enable)
164 pcie->ops->ltssm_enable(pcie);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200165
Joao Pinto886bc5c2016-03-10 14:44:35 -0600166 return dw_pcie_wait_for_link(&pcie->pp);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200167}
168
169static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
170{
171 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
Bjorn Helgaase6a087e2016-10-06 13:39:37 -0500172 struct device *dev = pcie->pp.dev;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200173
174 res->vdda = devm_regulator_get(dev, "vdda");
175 if (IS_ERR(res->vdda))
176 return PTR_ERR(res->vdda);
177
178 res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
179 if (IS_ERR(res->vdda_phy))
180 return PTR_ERR(res->vdda_phy);
181
182 res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
183 if (IS_ERR(res->vdda_refclk))
184 return PTR_ERR(res->vdda_refclk);
185
186 res->iface_clk = devm_clk_get(dev, "iface");
187 if (IS_ERR(res->iface_clk))
188 return PTR_ERR(res->iface_clk);
189
190 res->core_clk = devm_clk_get(dev, "core");
191 if (IS_ERR(res->core_clk))
192 return PTR_ERR(res->core_clk);
193
194 res->phy_clk = devm_clk_get(dev, "phy");
195 if (IS_ERR(res->phy_clk))
196 return PTR_ERR(res->phy_clk);
197
198 res->pci_reset = devm_reset_control_get(dev, "pci");
199 if (IS_ERR(res->pci_reset))
200 return PTR_ERR(res->pci_reset);
201
202 res->axi_reset = devm_reset_control_get(dev, "axi");
203 if (IS_ERR(res->axi_reset))
204 return PTR_ERR(res->axi_reset);
205
206 res->ahb_reset = devm_reset_control_get(dev, "ahb");
207 if (IS_ERR(res->ahb_reset))
208 return PTR_ERR(res->ahb_reset);
209
210 res->por_reset = devm_reset_control_get(dev, "por");
211 if (IS_ERR(res->por_reset))
212 return PTR_ERR(res->por_reset);
213
214 res->phy_reset = devm_reset_control_get(dev, "phy");
Fengguang Wu11a61a82017-02-04 09:35:32 +0800215 return PTR_ERR_OR_ZERO(res->phy_reset);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200216}
217
218static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
219{
220 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
Bjorn Helgaase6a087e2016-10-06 13:39:37 -0500221 struct device *dev = pcie->pp.dev;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200222
223 res->vdda = devm_regulator_get(dev, "vdda");
224 if (IS_ERR(res->vdda))
225 return PTR_ERR(res->vdda);
226
227 res->iface = devm_clk_get(dev, "iface");
228 if (IS_ERR(res->iface))
229 return PTR_ERR(res->iface);
230
231 res->aux = devm_clk_get(dev, "aux");
232 if (IS_ERR(res->aux))
233 return PTR_ERR(res->aux);
234
235 res->master_bus = devm_clk_get(dev, "master_bus");
236 if (IS_ERR(res->master_bus))
237 return PTR_ERR(res->master_bus);
238
239 res->slave_bus = devm_clk_get(dev, "slave_bus");
240 if (IS_ERR(res->slave_bus))
241 return PTR_ERR(res->slave_bus);
242
243 res->core = devm_reset_control_get(dev, "core");
Fengguang Wu11a61a82017-02-04 09:35:32 +0800244 return PTR_ERR_OR_ZERO(res->core);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200245}
246
247static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
248{
249 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
250
251 reset_control_assert(res->pci_reset);
252 reset_control_assert(res->axi_reset);
253 reset_control_assert(res->ahb_reset);
254 reset_control_assert(res->por_reset);
255 reset_control_assert(res->pci_reset);
256 clk_disable_unprepare(res->iface_clk);
257 clk_disable_unprepare(res->core_clk);
258 clk_disable_unprepare(res->phy_clk);
259 regulator_disable(res->vdda);
260 regulator_disable(res->vdda_phy);
261 regulator_disable(res->vdda_refclk);
262}
263
264static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
265{
266 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
Bjorn Helgaase6a087e2016-10-06 13:39:37 -0500267 struct device *dev = pcie->pp.dev;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200268 u32 val;
269 int ret;
270
271 ret = regulator_enable(res->vdda);
272 if (ret) {
273 dev_err(dev, "cannot enable vdda regulator\n");
274 return ret;
275 }
276
277 ret = regulator_enable(res->vdda_refclk);
278 if (ret) {
279 dev_err(dev, "cannot enable vdda_refclk regulator\n");
280 goto err_refclk;
281 }
282
283 ret = regulator_enable(res->vdda_phy);
284 if (ret) {
285 dev_err(dev, "cannot enable vdda_phy regulator\n");
286 goto err_vdda_phy;
287 }
288
289 ret = reset_control_assert(res->ahb_reset);
290 if (ret) {
291 dev_err(dev, "cannot assert ahb reset\n");
292 goto err_assert_ahb;
293 }
294
295 ret = clk_prepare_enable(res->iface_clk);
296 if (ret) {
297 dev_err(dev, "cannot prepare/enable iface clock\n");
298 goto err_assert_ahb;
299 }
300
301 ret = clk_prepare_enable(res->phy_clk);
302 if (ret) {
303 dev_err(dev, "cannot prepare/enable phy clock\n");
304 goto err_clk_phy;
305 }
306
307 ret = clk_prepare_enable(res->core_clk);
308 if (ret) {
309 dev_err(dev, "cannot prepare/enable core clock\n");
310 goto err_clk_core;
311 }
312
313 ret = reset_control_deassert(res->ahb_reset);
314 if (ret) {
315 dev_err(dev, "cannot deassert ahb reset\n");
316 goto err_deassert_ahb;
317 }
318
319 /* enable PCIe clocks and resets */
320 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
321 val &= ~BIT(0);
322 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
323
324 /* enable external reference clock */
325 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
326 val |= BIT(16);
327 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
328
329 ret = reset_control_deassert(res->phy_reset);
330 if (ret) {
331 dev_err(dev, "cannot deassert phy reset\n");
332 return ret;
333 }
334
335 ret = reset_control_deassert(res->pci_reset);
336 if (ret) {
337 dev_err(dev, "cannot deassert pci reset\n");
338 return ret;
339 }
340
341 ret = reset_control_deassert(res->por_reset);
342 if (ret) {
343 dev_err(dev, "cannot deassert por reset\n");
344 return ret;
345 }
346
347 ret = reset_control_deassert(res->axi_reset);
348 if (ret) {
349 dev_err(dev, "cannot deassert axi reset\n");
350 return ret;
351 }
352
353 /* wait for clock acquisition */
354 usleep_range(1000, 1500);
355
356 return 0;
357
358err_deassert_ahb:
359 clk_disable_unprepare(res->core_clk);
360err_clk_core:
361 clk_disable_unprepare(res->phy_clk);
362err_clk_phy:
363 clk_disable_unprepare(res->iface_clk);
364err_assert_ahb:
365 regulator_disable(res->vdda_phy);
366err_vdda_phy:
367 regulator_disable(res->vdda_refclk);
368err_refclk:
369 regulator_disable(res->vdda);
370
371 return ret;
372}
373
374static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
375{
376 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
377
378 reset_control_assert(res->core);
379 clk_disable_unprepare(res->slave_bus);
380 clk_disable_unprepare(res->master_bus);
381 clk_disable_unprepare(res->iface);
382 clk_disable_unprepare(res->aux);
383 regulator_disable(res->vdda);
384}
385
386static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
387{
388 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
Bjorn Helgaase6a087e2016-10-06 13:39:37 -0500389 struct device *dev = pcie->pp.dev;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200390 int ret;
391
392 ret = reset_control_deassert(res->core);
393 if (ret) {
394 dev_err(dev, "cannot deassert core reset\n");
395 return ret;
396 }
397
398 ret = clk_prepare_enable(res->aux);
399 if (ret) {
400 dev_err(dev, "cannot prepare/enable aux clock\n");
401 goto err_res;
402 }
403
404 ret = clk_prepare_enable(res->iface);
405 if (ret) {
406 dev_err(dev, "cannot prepare/enable iface clock\n");
407 goto err_aux;
408 }
409
410 ret = clk_prepare_enable(res->master_bus);
411 if (ret) {
412 dev_err(dev, "cannot prepare/enable master_bus clock\n");
413 goto err_iface;
414 }
415
416 ret = clk_prepare_enable(res->slave_bus);
417 if (ret) {
418 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
419 goto err_master;
420 }
421
422 ret = regulator_enable(res->vdda);
423 if (ret) {
424 dev_err(dev, "cannot enable vdda regulator\n");
425 goto err_slave;
426 }
427
428 /* change DBI base address */
429 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
430
431 if (IS_ENABLED(CONFIG_PCI_MSI)) {
432 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
433
434 val |= BIT(31);
435 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
436 }
437
438 return 0;
439err_slave:
440 clk_disable_unprepare(res->slave_bus);
441err_master:
442 clk_disable_unprepare(res->master_bus);
443err_iface:
444 clk_disable_unprepare(res->iface);
445err_aux:
446 clk_disable_unprepare(res->aux);
447err_res:
448 reset_control_assert(res->core);
449
450 return ret;
451}
452
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000453static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
454{
455 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
456 struct device *dev = pcie->pp.dev;
457
458 res->aux_clk = devm_clk_get(dev, "aux");
459 if (IS_ERR(res->aux_clk))
460 return PTR_ERR(res->aux_clk);
461
462 res->cfg_clk = devm_clk_get(dev, "cfg");
463 if (IS_ERR(res->cfg_clk))
464 return PTR_ERR(res->cfg_clk);
465
466 res->master_clk = devm_clk_get(dev, "bus_master");
467 if (IS_ERR(res->master_clk))
468 return PTR_ERR(res->master_clk);
469
470 res->slave_clk = devm_clk_get(dev, "bus_slave");
471 if (IS_ERR(res->slave_clk))
472 return PTR_ERR(res->slave_clk);
473
474 res->pipe_clk = devm_clk_get(dev, "pipe");
Fengguang Wu11a61a82017-02-04 09:35:32 +0800475 return PTR_ERR_OR_ZERO(res->pipe_clk);
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000476}
477
478static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
479{
480 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
481 struct device *dev = pcie->pp.dev;
482 u32 val;
483 int ret;
484
485 ret = clk_prepare_enable(res->aux_clk);
486 if (ret) {
487 dev_err(dev, "cannot prepare/enable aux clock\n");
488 return ret;
489 }
490
491 ret = clk_prepare_enable(res->cfg_clk);
492 if (ret) {
493 dev_err(dev, "cannot prepare/enable cfg clock\n");
494 goto err_cfg_clk;
495 }
496
497 ret = clk_prepare_enable(res->master_clk);
498 if (ret) {
499 dev_err(dev, "cannot prepare/enable master clock\n");
500 goto err_master_clk;
501 }
502
503 ret = clk_prepare_enable(res->slave_clk);
504 if (ret) {
505 dev_err(dev, "cannot prepare/enable slave clock\n");
506 goto err_slave_clk;
507 }
508
509 /* enable PCIe clocks and resets */
510 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
511 val &= ~BIT(0);
512 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
513
514 /* change DBI base address */
515 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
516
517 /* MAC PHY_POWERDOWN MUX DISABLE */
518 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
519 val &= ~BIT(29);
520 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
521
522 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
523 val |= BIT(4);
524 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
525
526 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
527 val |= BIT(31);
528 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
529
530 return 0;
531
532err_slave_clk:
533 clk_disable_unprepare(res->master_clk);
534err_master_clk:
535 clk_disable_unprepare(res->cfg_clk);
536err_cfg_clk:
537 clk_disable_unprepare(res->aux_clk);
538
539 return ret;
540}
541
542static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
543{
544 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
545 struct device *dev = pcie->pp.dev;
546 int ret;
547
548 ret = clk_prepare_enable(res->pipe_clk);
549 if (ret) {
550 dev_err(dev, "cannot prepare/enable pipe clock\n");
551 return ret;
552 }
553
554 return 0;
555}
556
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200557static int qcom_pcie_link_up(struct pcie_port *pp)
558{
559 struct qcom_pcie *pcie = to_qcom_pcie(pp);
Bjorn Helgaas0edd5782016-10-06 13:39:37 -0500560 u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200561
562 return !!(val & PCI_EXP_LNKSTA_DLLLA);
563}
564
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000565static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
566{
567 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
568
569 clk_disable_unprepare(res->pipe_clk);
570 clk_disable_unprepare(res->slave_clk);
571 clk_disable_unprepare(res->master_clk);
572 clk_disable_unprepare(res->cfg_clk);
573 clk_disable_unprepare(res->aux_clk);
574}
575
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200576static void qcom_pcie_host_init(struct pcie_port *pp)
577{
578 struct qcom_pcie *pcie = to_qcom_pcie(pp);
579 int ret;
580
581 qcom_ep_reset_assert(pcie);
582
583 ret = pcie->ops->init(pcie);
584 if (ret)
585 goto err_deinit;
586
587 ret = phy_power_on(pcie->phy);
588 if (ret)
589 goto err_deinit;
590
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000591 if (pcie->ops->post_init)
592 pcie->ops->post_init(pcie);
593
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200594 dw_pcie_setup_rc(pp);
595
596 if (IS_ENABLED(CONFIG_PCI_MSI))
597 dw_pcie_msi_init(pp);
598
599 qcom_ep_reset_deassert(pcie);
600
601 ret = qcom_pcie_establish_link(pcie);
602 if (ret)
603 goto err;
604
605 return;
606err:
607 qcom_ep_reset_assert(pcie);
608 phy_power_off(pcie->phy);
609err_deinit:
610 pcie->ops->deinit(pcie);
611}
612
613static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
614 u32 *val)
615{
616 /* the device class is not reported correctly from the register */
617 if (where == PCI_CLASS_REVISION && size == 4) {
618 *val = readl(pp->dbi_base + PCI_CLASS_REVISION);
619 *val &= 0xff; /* keep revision id */
620 *val |= PCI_CLASS_BRIDGE_PCI << 16;
621 return PCIBIOS_SUCCESSFUL;
622 }
623
624 return dw_pcie_cfg_read(pp->dbi_base + where, size, val);
625}
626
627static struct pcie_host_ops qcom_pcie_dw_ops = {
628 .link_up = qcom_pcie_link_up,
629 .host_init = qcom_pcie_host_init,
630 .rd_own_conf = qcom_pcie_rd_own_conf,
631};
632
633static const struct qcom_pcie_ops ops_v0 = {
634 .get_resources = qcom_pcie_get_resources_v0,
635 .init = qcom_pcie_init_v0,
636 .deinit = qcom_pcie_deinit_v0,
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000637 .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200638};
639
640static const struct qcom_pcie_ops ops_v1 = {
641 .get_resources = qcom_pcie_get_resources_v1,
642 .init = qcom_pcie_init_v1,
643 .deinit = qcom_pcie_deinit_v1,
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000644 .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
645};
646
647static const struct qcom_pcie_ops ops_v2 = {
648 .get_resources = qcom_pcie_get_resources_v2,
649 .init = qcom_pcie_init_v2,
650 .post_init = qcom_pcie_post_init_v2,
651 .deinit = qcom_pcie_deinit_v2,
652 .ltssm_enable = qcom_pcie_v2_ltssm_enable,
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200653};
654
655static int qcom_pcie_probe(struct platform_device *pdev)
656{
657 struct device *dev = &pdev->dev;
658 struct resource *res;
659 struct qcom_pcie *pcie;
660 struct pcie_port *pp;
661 int ret;
662
663 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
664 if (!pcie)
665 return -ENOMEM;
666
Bjorn Helgaas0edd5782016-10-06 13:39:37 -0500667 pp = &pcie->pp;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200668 pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200669
670 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
671 if (IS_ERR(pcie->reset))
672 return PTR_ERR(pcie->reset);
673
674 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
675 pcie->parf = devm_ioremap_resource(dev, res);
676 if (IS_ERR(pcie->parf))
677 return PTR_ERR(pcie->parf);
678
679 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
Bjorn Helgaas0edd5782016-10-06 13:39:37 -0500680 pp->dbi_base = devm_ioremap_resource(dev, res);
681 if (IS_ERR(pp->dbi_base))
682 return PTR_ERR(pp->dbi_base);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200683
684 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
685 pcie->elbi = devm_ioremap_resource(dev, res);
686 if (IS_ERR(pcie->elbi))
687 return PTR_ERR(pcie->elbi);
688
689 pcie->phy = devm_phy_optional_get(dev, "pciephy");
690 if (IS_ERR(pcie->phy))
691 return PTR_ERR(pcie->phy);
692
Srinivas Kandagatlaad110442016-10-25 14:42:51 +0100693 pp->dev = dev;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200694 ret = pcie->ops->get_resources(pcie);
695 if (ret)
696 return ret;
697
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200698 pp->root_bus_nr = -1;
699 pp->ops = &qcom_pcie_dw_ops;
700
701 if (IS_ENABLED(CONFIG_PCI_MSI)) {
702 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
703 if (pp->msi_irq < 0)
704 return pp->msi_irq;
705
706 ret = devm_request_irq(dev, pp->msi_irq,
707 qcom_pcie_msi_irq_handler,
708 IRQF_SHARED, "qcom-pcie-msi", pp);
709 if (ret) {
710 dev_err(dev, "cannot request msi irq\n");
711 return ret;
712 }
713 }
714
715 ret = phy_init(pcie->phy);
716 if (ret)
717 return ret;
718
719 ret = dw_pcie_host_init(pp);
720 if (ret) {
721 dev_err(dev, "cannot initialize host\n");
722 return ret;
723 }
724
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200725 return 0;
726}
727
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200728static const struct of_device_id qcom_pcie_match[] = {
729 { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
730 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
731 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000732 { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200733 { }
734};
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200735
736static struct platform_driver qcom_pcie_driver = {
737 .probe = qcom_pcie_probe,
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200738 .driver = {
739 .name = "qcom-pcie",
Paul Gortmakerf9a66602016-08-24 16:57:48 -0400740 .suppress_bind_attrs = true,
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200741 .of_match_table = qcom_pcie_match,
742 },
743};
Paul Gortmakerf9a66602016-08-24 16:57:48 -0400744builtin_platform_driver(qcom_pcie_driver);