blob: 96ee527c1ed2e0344b3bf88506e3faaf9d0838ea [file] [log] [blame]
Stanimir Varbanov82a82382015-12-18 14:38:57 +02001/*
Paul Gortmakerf9a66602016-08-24 16:57:48 -04002 * Qualcomm PCIe root complex driver
3 *
Stanimir Varbanov82a82382015-12-18 14:38:57 +02004 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
5 * Copyright 2015 Linaro Limited.
6 *
Paul Gortmakerf9a66602016-08-24 16:57:48 -04007 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
8 *
Stanimir Varbanov82a82382015-12-18 14:38:57 +02009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 and
11 * only version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/clk.h>
20#include <linux/delay.h>
21#include <linux/gpio.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/iopoll.h>
25#include <linux/kernel.h>
Paul Gortmakerf9a66602016-08-24 16:57:48 -040026#include <linux/init.h>
Stanimir Varbanov82a82382015-12-18 14:38:57 +020027#include <linux/of_device.h>
28#include <linux/of_gpio.h>
29#include <linux/pci.h>
30#include <linux/platform_device.h>
31#include <linux/phy/phy.h>
32#include <linux/regulator/consumer.h>
33#include <linux/reset.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36
37#include "pcie-designware.h"
38
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000039#define PCIE20_PARF_SYS_CTRL 0x00
Stanimir Varbanov82a82382015-12-18 14:38:57 +020040#define PCIE20_PARF_PHY_CTRL 0x40
41#define PCIE20_PARF_PHY_REFCLK 0x4C
42#define PCIE20_PARF_DBI_BASE_ADDR 0x168
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000043#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
44#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
Stanimir Varbanov82a82382015-12-18 14:38:57 +020045#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000046#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
47#define PCIE20_PARF_LTSSM 0x1B0
48#define PCIE20_PARF_SID_OFFSET 0x234
49#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
Stanimir Varbanov82a82382015-12-18 14:38:57 +020050
51#define PCIE20_ELBI_SYS_CTRL 0x04
52#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
53
54#define PCIE20_CAP 0x70
55
56#define PERST_DELAY_US 1000
57
58struct qcom_pcie_resources_v0 {
59 struct clk *iface_clk;
60 struct clk *core_clk;
61 struct clk *phy_clk;
62 struct reset_control *pci_reset;
63 struct reset_control *axi_reset;
64 struct reset_control *ahb_reset;
65 struct reset_control *por_reset;
66 struct reset_control *phy_reset;
67 struct regulator *vdda;
68 struct regulator *vdda_phy;
69 struct regulator *vdda_refclk;
70};
71
72struct qcom_pcie_resources_v1 {
73 struct clk *iface;
74 struct clk *aux;
75 struct clk *master_bus;
76 struct clk *slave_bus;
77 struct reset_control *core;
78 struct regulator *vdda;
79};
80
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000081struct qcom_pcie_resources_v2 {
82 struct clk *aux_clk;
83 struct clk *master_clk;
84 struct clk *slave_clk;
85 struct clk *cfg_clk;
86 struct clk *pipe_clk;
87};
88
Stanimir Varbanov82a82382015-12-18 14:38:57 +020089union qcom_pcie_resources {
90 struct qcom_pcie_resources_v0 v0;
91 struct qcom_pcie_resources_v1 v1;
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +000092 struct qcom_pcie_resources_v2 v2;
Stanimir Varbanov82a82382015-12-18 14:38:57 +020093};
94
95struct qcom_pcie;
96
97struct qcom_pcie_ops {
98 int (*get_resources)(struct qcom_pcie *pcie);
99 int (*init)(struct qcom_pcie *pcie);
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000100 int (*post_init)(struct qcom_pcie *pcie);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200101 void (*deinit)(struct qcom_pcie *pcie);
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000102 void (*ltssm_enable)(struct qcom_pcie *pcie);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200103};
104
105struct qcom_pcie {
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530106 struct dw_pcie *pci;
Bjorn Helgaasee053692016-10-06 13:39:37 -0500107 void __iomem *parf; /* DT parf */
108 void __iomem *elbi; /* DT elbi */
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200109 union qcom_pcie_resources res;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200110 struct phy *phy;
111 struct gpio_desc *reset;
112 struct qcom_pcie_ops *ops;
113};
114
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530115#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200116
117static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
118{
119 gpiod_set_value(pcie->reset, 1);
120 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
121}
122
123static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
124{
125 gpiod_set_value(pcie->reset, 0);
126 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
127}
128
129static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
130{
131 struct pcie_port *pp = arg;
132
133 return dw_handle_msi_irq(pp);
134}
135
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000136static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200137{
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200138 u32 val;
139
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200140 /* enable link training */
141 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
142 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
143 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000144}
145
146static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
147{
148 u32 val;
149
150 /* enable link training */
151 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
152 val |= BIT(8);
153 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
154}
155
156static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
157{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530158 struct dw_pcie *pci = pcie->pci;
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000159
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530160 if (dw_pcie_link_up(pci))
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000161 return 0;
162
163 /* Enable Link Training state machine */
164 if (pcie->ops->ltssm_enable)
165 pcie->ops->ltssm_enable(pcie);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200166
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530167 return dw_pcie_wait_for_link(pci);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200168}
169
170static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
171{
172 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530173 struct dw_pcie *pci = pcie->pci;
174 struct device *dev = pci->dev;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200175
176 res->vdda = devm_regulator_get(dev, "vdda");
177 if (IS_ERR(res->vdda))
178 return PTR_ERR(res->vdda);
179
180 res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
181 if (IS_ERR(res->vdda_phy))
182 return PTR_ERR(res->vdda_phy);
183
184 res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
185 if (IS_ERR(res->vdda_refclk))
186 return PTR_ERR(res->vdda_refclk);
187
188 res->iface_clk = devm_clk_get(dev, "iface");
189 if (IS_ERR(res->iface_clk))
190 return PTR_ERR(res->iface_clk);
191
192 res->core_clk = devm_clk_get(dev, "core");
193 if (IS_ERR(res->core_clk))
194 return PTR_ERR(res->core_clk);
195
196 res->phy_clk = devm_clk_get(dev, "phy");
197 if (IS_ERR(res->phy_clk))
198 return PTR_ERR(res->phy_clk);
199
200 res->pci_reset = devm_reset_control_get(dev, "pci");
201 if (IS_ERR(res->pci_reset))
202 return PTR_ERR(res->pci_reset);
203
204 res->axi_reset = devm_reset_control_get(dev, "axi");
205 if (IS_ERR(res->axi_reset))
206 return PTR_ERR(res->axi_reset);
207
208 res->ahb_reset = devm_reset_control_get(dev, "ahb");
209 if (IS_ERR(res->ahb_reset))
210 return PTR_ERR(res->ahb_reset);
211
212 res->por_reset = devm_reset_control_get(dev, "por");
213 if (IS_ERR(res->por_reset))
214 return PTR_ERR(res->por_reset);
215
216 res->phy_reset = devm_reset_control_get(dev, "phy");
Fengguang Wu11a61a82017-02-04 09:35:32 +0800217 return PTR_ERR_OR_ZERO(res->phy_reset);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200218}
219
220static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
221{
222 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530223 struct dw_pcie *pci = pcie->pci;
224 struct device *dev = pci->dev;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200225
226 res->vdda = devm_regulator_get(dev, "vdda");
227 if (IS_ERR(res->vdda))
228 return PTR_ERR(res->vdda);
229
230 res->iface = devm_clk_get(dev, "iface");
231 if (IS_ERR(res->iface))
232 return PTR_ERR(res->iface);
233
234 res->aux = devm_clk_get(dev, "aux");
235 if (IS_ERR(res->aux))
236 return PTR_ERR(res->aux);
237
238 res->master_bus = devm_clk_get(dev, "master_bus");
239 if (IS_ERR(res->master_bus))
240 return PTR_ERR(res->master_bus);
241
242 res->slave_bus = devm_clk_get(dev, "slave_bus");
243 if (IS_ERR(res->slave_bus))
244 return PTR_ERR(res->slave_bus);
245
246 res->core = devm_reset_control_get(dev, "core");
Fengguang Wu11a61a82017-02-04 09:35:32 +0800247 return PTR_ERR_OR_ZERO(res->core);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200248}
249
250static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
251{
252 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
253
254 reset_control_assert(res->pci_reset);
255 reset_control_assert(res->axi_reset);
256 reset_control_assert(res->ahb_reset);
257 reset_control_assert(res->por_reset);
258 reset_control_assert(res->pci_reset);
259 clk_disable_unprepare(res->iface_clk);
260 clk_disable_unprepare(res->core_clk);
261 clk_disable_unprepare(res->phy_clk);
262 regulator_disable(res->vdda);
263 regulator_disable(res->vdda_phy);
264 regulator_disable(res->vdda_refclk);
265}
266
267static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
268{
269 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530270 struct dw_pcie *pci = pcie->pci;
271 struct device *dev = pci->dev;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200272 u32 val;
273 int ret;
274
275 ret = regulator_enable(res->vdda);
276 if (ret) {
277 dev_err(dev, "cannot enable vdda regulator\n");
278 return ret;
279 }
280
281 ret = regulator_enable(res->vdda_refclk);
282 if (ret) {
283 dev_err(dev, "cannot enable vdda_refclk regulator\n");
284 goto err_refclk;
285 }
286
287 ret = regulator_enable(res->vdda_phy);
288 if (ret) {
289 dev_err(dev, "cannot enable vdda_phy regulator\n");
290 goto err_vdda_phy;
291 }
292
293 ret = reset_control_assert(res->ahb_reset);
294 if (ret) {
295 dev_err(dev, "cannot assert ahb reset\n");
296 goto err_assert_ahb;
297 }
298
299 ret = clk_prepare_enable(res->iface_clk);
300 if (ret) {
301 dev_err(dev, "cannot prepare/enable iface clock\n");
302 goto err_assert_ahb;
303 }
304
305 ret = clk_prepare_enable(res->phy_clk);
306 if (ret) {
307 dev_err(dev, "cannot prepare/enable phy clock\n");
308 goto err_clk_phy;
309 }
310
311 ret = clk_prepare_enable(res->core_clk);
312 if (ret) {
313 dev_err(dev, "cannot prepare/enable core clock\n");
314 goto err_clk_core;
315 }
316
317 ret = reset_control_deassert(res->ahb_reset);
318 if (ret) {
319 dev_err(dev, "cannot deassert ahb reset\n");
320 goto err_deassert_ahb;
321 }
322
323 /* enable PCIe clocks and resets */
324 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
325 val &= ~BIT(0);
326 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
327
328 /* enable external reference clock */
329 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
330 val |= BIT(16);
331 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
332
333 ret = reset_control_deassert(res->phy_reset);
334 if (ret) {
335 dev_err(dev, "cannot deassert phy reset\n");
336 return ret;
337 }
338
339 ret = reset_control_deassert(res->pci_reset);
340 if (ret) {
341 dev_err(dev, "cannot deassert pci reset\n");
342 return ret;
343 }
344
345 ret = reset_control_deassert(res->por_reset);
346 if (ret) {
347 dev_err(dev, "cannot deassert por reset\n");
348 return ret;
349 }
350
351 ret = reset_control_deassert(res->axi_reset);
352 if (ret) {
353 dev_err(dev, "cannot deassert axi reset\n");
354 return ret;
355 }
356
357 /* wait for clock acquisition */
358 usleep_range(1000, 1500);
359
360 return 0;
361
362err_deassert_ahb:
363 clk_disable_unprepare(res->core_clk);
364err_clk_core:
365 clk_disable_unprepare(res->phy_clk);
366err_clk_phy:
367 clk_disable_unprepare(res->iface_clk);
368err_assert_ahb:
369 regulator_disable(res->vdda_phy);
370err_vdda_phy:
371 regulator_disable(res->vdda_refclk);
372err_refclk:
373 regulator_disable(res->vdda);
374
375 return ret;
376}
377
378static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
379{
380 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
381
382 reset_control_assert(res->core);
383 clk_disable_unprepare(res->slave_bus);
384 clk_disable_unprepare(res->master_bus);
385 clk_disable_unprepare(res->iface);
386 clk_disable_unprepare(res->aux);
387 regulator_disable(res->vdda);
388}
389
390static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
391{
392 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530393 struct dw_pcie *pci = pcie->pci;
394 struct device *dev = pci->dev;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200395 int ret;
396
397 ret = reset_control_deassert(res->core);
398 if (ret) {
399 dev_err(dev, "cannot deassert core reset\n");
400 return ret;
401 }
402
403 ret = clk_prepare_enable(res->aux);
404 if (ret) {
405 dev_err(dev, "cannot prepare/enable aux clock\n");
406 goto err_res;
407 }
408
409 ret = clk_prepare_enable(res->iface);
410 if (ret) {
411 dev_err(dev, "cannot prepare/enable iface clock\n");
412 goto err_aux;
413 }
414
415 ret = clk_prepare_enable(res->master_bus);
416 if (ret) {
417 dev_err(dev, "cannot prepare/enable master_bus clock\n");
418 goto err_iface;
419 }
420
421 ret = clk_prepare_enable(res->slave_bus);
422 if (ret) {
423 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
424 goto err_master;
425 }
426
427 ret = regulator_enable(res->vdda);
428 if (ret) {
429 dev_err(dev, "cannot enable vdda regulator\n");
430 goto err_slave;
431 }
432
433 /* change DBI base address */
434 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
435
436 if (IS_ENABLED(CONFIG_PCI_MSI)) {
437 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
438
439 val |= BIT(31);
440 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
441 }
442
443 return 0;
444err_slave:
445 clk_disable_unprepare(res->slave_bus);
446err_master:
447 clk_disable_unprepare(res->master_bus);
448err_iface:
449 clk_disable_unprepare(res->iface);
450err_aux:
451 clk_disable_unprepare(res->aux);
452err_res:
453 reset_control_assert(res->core);
454
455 return ret;
456}
457
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000458static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
459{
460 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530461 struct dw_pcie *pci = pcie->pci;
462 struct device *dev = pci->dev;
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000463
464 res->aux_clk = devm_clk_get(dev, "aux");
465 if (IS_ERR(res->aux_clk))
466 return PTR_ERR(res->aux_clk);
467
468 res->cfg_clk = devm_clk_get(dev, "cfg");
469 if (IS_ERR(res->cfg_clk))
470 return PTR_ERR(res->cfg_clk);
471
472 res->master_clk = devm_clk_get(dev, "bus_master");
473 if (IS_ERR(res->master_clk))
474 return PTR_ERR(res->master_clk);
475
476 res->slave_clk = devm_clk_get(dev, "bus_slave");
477 if (IS_ERR(res->slave_clk))
478 return PTR_ERR(res->slave_clk);
479
480 res->pipe_clk = devm_clk_get(dev, "pipe");
Fengguang Wu11a61a82017-02-04 09:35:32 +0800481 return PTR_ERR_OR_ZERO(res->pipe_clk);
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000482}
483
484static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
485{
486 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530487 struct dw_pcie *pci = pcie->pci;
488 struct device *dev = pci->dev;
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000489 u32 val;
490 int ret;
491
492 ret = clk_prepare_enable(res->aux_clk);
493 if (ret) {
494 dev_err(dev, "cannot prepare/enable aux clock\n");
495 return ret;
496 }
497
498 ret = clk_prepare_enable(res->cfg_clk);
499 if (ret) {
500 dev_err(dev, "cannot prepare/enable cfg clock\n");
501 goto err_cfg_clk;
502 }
503
504 ret = clk_prepare_enable(res->master_clk);
505 if (ret) {
506 dev_err(dev, "cannot prepare/enable master clock\n");
507 goto err_master_clk;
508 }
509
510 ret = clk_prepare_enable(res->slave_clk);
511 if (ret) {
512 dev_err(dev, "cannot prepare/enable slave clock\n");
513 goto err_slave_clk;
514 }
515
516 /* enable PCIe clocks and resets */
517 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
518 val &= ~BIT(0);
519 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
520
521 /* change DBI base address */
522 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
523
524 /* MAC PHY_POWERDOWN MUX DISABLE */
525 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
526 val &= ~BIT(29);
527 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
528
529 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
530 val |= BIT(4);
531 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
532
533 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
534 val |= BIT(31);
535 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
536
537 return 0;
538
539err_slave_clk:
540 clk_disable_unprepare(res->master_clk);
541err_master_clk:
542 clk_disable_unprepare(res->cfg_clk);
543err_cfg_clk:
544 clk_disable_unprepare(res->aux_clk);
545
546 return ret;
547}
548
549static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
550{
551 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530552 struct dw_pcie *pci = pcie->pci;
553 struct device *dev = pci->dev;
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000554 int ret;
555
556 ret = clk_prepare_enable(res->pipe_clk);
557 if (ret) {
558 dev_err(dev, "cannot prepare/enable pipe clock\n");
559 return ret;
560 }
561
562 return 0;
563}
564
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530565static int qcom_pcie_link_up(struct dw_pcie *pci)
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200566{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530567 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200568
569 return !!(val & PCI_EXP_LNKSTA_DLLLA);
570}
571
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000572static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
573{
574 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
575
576 clk_disable_unprepare(res->pipe_clk);
577 clk_disable_unprepare(res->slave_clk);
578 clk_disable_unprepare(res->master_clk);
579 clk_disable_unprepare(res->cfg_clk);
580 clk_disable_unprepare(res->aux_clk);
581}
582
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200583static void qcom_pcie_host_init(struct pcie_port *pp)
584{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530585 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
586 struct qcom_pcie *pcie = to_qcom_pcie(pci);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200587 int ret;
588
589 qcom_ep_reset_assert(pcie);
590
591 ret = pcie->ops->init(pcie);
592 if (ret)
593 goto err_deinit;
594
595 ret = phy_power_on(pcie->phy);
596 if (ret)
597 goto err_deinit;
598
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000599 if (pcie->ops->post_init)
600 pcie->ops->post_init(pcie);
601
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200602 dw_pcie_setup_rc(pp);
603
604 if (IS_ENABLED(CONFIG_PCI_MSI))
605 dw_pcie_msi_init(pp);
606
607 qcom_ep_reset_deassert(pcie);
608
609 ret = qcom_pcie_establish_link(pcie);
610 if (ret)
611 goto err;
612
613 return;
614err:
615 qcom_ep_reset_assert(pcie);
616 phy_power_off(pcie->phy);
617err_deinit:
618 pcie->ops->deinit(pcie);
619}
620
621static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
622 u32 *val)
623{
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530624 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
625
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200626 /* the device class is not reported correctly from the register */
627 if (where == PCI_CLASS_REVISION && size == 4) {
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530628 *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200629 *val &= 0xff; /* keep revision id */
630 *val |= PCI_CLASS_BRIDGE_PCI << 16;
631 return PCIBIOS_SUCCESSFUL;
632 }
633
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530634 return dw_pcie_read(pci->dbi_base + where, size, val);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200635}
636
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530637static struct dw_pcie_host_ops qcom_pcie_dw_ops = {
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200638 .host_init = qcom_pcie_host_init,
639 .rd_own_conf = qcom_pcie_rd_own_conf,
640};
641
642static const struct qcom_pcie_ops ops_v0 = {
643 .get_resources = qcom_pcie_get_resources_v0,
644 .init = qcom_pcie_init_v0,
645 .deinit = qcom_pcie_deinit_v0,
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000646 .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200647};
648
649static const struct qcom_pcie_ops ops_v1 = {
650 .get_resources = qcom_pcie_get_resources_v1,
651 .init = qcom_pcie_init_v1,
652 .deinit = qcom_pcie_deinit_v1,
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000653 .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
654};
655
656static const struct qcom_pcie_ops ops_v2 = {
657 .get_resources = qcom_pcie_get_resources_v2,
658 .init = qcom_pcie_init_v2,
659 .post_init = qcom_pcie_post_init_v2,
660 .deinit = qcom_pcie_deinit_v2,
661 .ltssm_enable = qcom_pcie_v2_ltssm_enable,
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200662};
663
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530664static const struct dw_pcie_ops dw_pcie_ops = {
665 .link_up = qcom_pcie_link_up,
666};
667
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200668static int qcom_pcie_probe(struct platform_device *pdev)
669{
670 struct device *dev = &pdev->dev;
671 struct resource *res;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200672 struct pcie_port *pp;
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530673 struct dw_pcie *pci;
674 struct qcom_pcie *pcie;
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200675 int ret;
676
677 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
678 if (!pcie)
679 return -ENOMEM;
680
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530681 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
682 if (!pci)
683 return -ENOMEM;
684
685 pci->dev = dev;
686 pci->ops = &dw_pcie_ops;
687 pp = &pci->pp;
688
Guenter Roeckc0464062017-02-25 02:08:12 -0800689 pcie->pci = pci;
690
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200691 pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200692
693 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
694 if (IS_ERR(pcie->reset))
695 return PTR_ERR(pcie->reset);
696
697 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
698 pcie->parf = devm_ioremap_resource(dev, res);
699 if (IS_ERR(pcie->parf))
700 return PTR_ERR(pcie->parf);
701
702 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
Lorenzo Pieralisi10c736f2017-04-19 17:49:01 +0100703 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
Kishon Vijay Abraham I442ec4c2017-02-15 18:48:14 +0530704 if (IS_ERR(pci->dbi_base))
705 return PTR_ERR(pci->dbi_base);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200706
707 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
708 pcie->elbi = devm_ioremap_resource(dev, res);
709 if (IS_ERR(pcie->elbi))
710 return PTR_ERR(pcie->elbi);
711
712 pcie->phy = devm_phy_optional_get(dev, "pciephy");
713 if (IS_ERR(pcie->phy))
714 return PTR_ERR(pcie->phy);
715
716 ret = pcie->ops->get_resources(pcie);
717 if (ret)
718 return ret;
719
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200720 pp->root_bus_nr = -1;
721 pp->ops = &qcom_pcie_dw_ops;
722
723 if (IS_ENABLED(CONFIG_PCI_MSI)) {
724 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
725 if (pp->msi_irq < 0)
726 return pp->msi_irq;
727
728 ret = devm_request_irq(dev, pp->msi_irq,
729 qcom_pcie_msi_irq_handler,
Jisheng Zhang3eefa792017-04-20 18:27:18 +0800730 IRQF_SHARED | IRQF_NO_THREAD,
731 "qcom-pcie-msi", pp);
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200732 if (ret) {
733 dev_err(dev, "cannot request msi irq\n");
734 return ret;
735 }
736 }
737
738 ret = phy_init(pcie->phy);
739 if (ret)
740 return ret;
741
Kishon Vijay Abraham I9bcf0a62017-02-15 18:48:11 +0530742 platform_set_drvdata(pdev, pcie);
743
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200744 ret = dw_pcie_host_init(pp);
745 if (ret) {
746 dev_err(dev, "cannot initialize host\n");
747 return ret;
748 }
749
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200750 return 0;
751}
752
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200753static const struct of_device_id qcom_pcie_match[] = {
754 { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
755 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
756 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
Srinivas Kandagatlad0491fc2016-11-22 10:43:29 +0000757 { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200758 { }
759};
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200760
761static struct platform_driver qcom_pcie_driver = {
762 .probe = qcom_pcie_probe,
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200763 .driver = {
764 .name = "qcom-pcie",
Paul Gortmakerf9a66602016-08-24 16:57:48 -0400765 .suppress_bind_attrs = true,
Stanimir Varbanov82a82382015-12-18 14:38:57 +0200766 .of_match_table = qcom_pcie_match,
767 },
768};
Paul Gortmakerf9a66602016-08-24 16:57:48 -0400769builtin_platform_driver(qcom_pcie_driver);