blob: f9be37139429859394041bee7d2da96bfea434ab [file] [log] [blame]
Jingoo Han340cba62013-06-21 16:24:54 +09001/*
Jingoo Han4b1ced82013-07-31 17:14:10 +09002 * Synopsys Designware PCIe host controller driver
Jingoo Han340cba62013-06-21 16:24:54 +09003 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
Jingoo Hanf342d942013-09-06 15:54:59 +090014#include <linux/irq.h>
15#include <linux/irqdomain.h>
Jingoo Han340cba62013-06-21 16:24:54 +090016#include <linux/kernel.h>
Jingoo Han340cba62013-06-21 16:24:54 +090017#include <linux/module.h>
Jingoo Hanf342d942013-09-06 15:54:59 +090018#include <linux/msi.h>
Jingoo Han340cba62013-06-21 16:24:54 +090019#include <linux/of_address.h>
Lucas Stach804f57b2014-03-05 14:25:51 +010020#include <linux/of_pci.h>
Jingoo Han340cba62013-06-21 16:24:54 +090021#include <linux/pci.h>
22#include <linux/pci_regs.h>
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +053023#include <linux/platform_device.h>
Jingoo Han340cba62013-06-21 16:24:54 +090024#include <linux/types.h>
25
Jingoo Han4b1ced82013-07-31 17:14:10 +090026#include "pcie-designware.h"
Jingoo Han340cba62013-06-21 16:24:54 +090027
28/* Synopsis specific PCIE configuration registers */
29#define PCIE_PORT_LINK_CONTROL 0x710
30#define PORT_LINK_MODE_MASK (0x3f << 16)
Jingoo Han4b1ced82013-07-31 17:14:10 +090031#define PORT_LINK_MODE_1_LANES (0x1 << 16)
32#define PORT_LINK_MODE_2_LANES (0x3 << 16)
Jingoo Han340cba62013-06-21 16:24:54 +090033#define PORT_LINK_MODE_4_LANES (0x7 << 16)
34
35#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
36#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
37#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
Jingoo Han4b1ced82013-07-31 17:14:10 +090038#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
39#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
40#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
Jingoo Han340cba62013-06-21 16:24:54 +090041
42#define PCIE_MSI_ADDR_LO 0x820
43#define PCIE_MSI_ADDR_HI 0x824
44#define PCIE_MSI_INTR0_ENABLE 0x828
45#define PCIE_MSI_INTR0_MASK 0x82C
46#define PCIE_MSI_INTR0_STATUS 0x830
47
48#define PCIE_ATU_VIEWPORT 0x900
49#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
50#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
51#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
52#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
53#define PCIE_ATU_CR1 0x904
54#define PCIE_ATU_TYPE_MEM (0x0 << 0)
55#define PCIE_ATU_TYPE_IO (0x2 << 0)
56#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
57#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
58#define PCIE_ATU_CR2 0x908
59#define PCIE_ATU_ENABLE (0x1 << 31)
60#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
61#define PCIE_ATU_LOWER_BASE 0x90C
62#define PCIE_ATU_UPPER_BASE 0x910
63#define PCIE_ATU_LIMIT 0x914
64#define PCIE_ATU_LOWER_TARGET 0x918
65#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
66#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
67#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
68#define PCIE_ATU_UPPER_TARGET 0x91C
69
Jingoo Han4b1ced82013-07-31 17:14:10 +090070static struct hw_pci dw_pci;
Jingoo Han340cba62013-06-21 16:24:54 +090071
Bjorn Helgaas73e40852013-10-09 09:12:37 -060072static unsigned long global_io_offset;
Jingoo Han340cba62013-06-21 16:24:54 +090073
74static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
75{
Lucas Stach84a263f2014-09-05 09:37:55 -060076 BUG_ON(!sys->private_data);
77
Jingoo Han340cba62013-06-21 16:24:54 +090078 return sys->private_data;
79}
80
Pratyush Ananda01ef592013-12-11 15:08:32 +053081int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +090082{
83 *val = readl(addr);
84
85 if (size == 1)
86 *val = (*val >> (8 * (where & 3))) & 0xff;
87 else if (size == 2)
88 *val = (*val >> (8 * (where & 3))) & 0xffff;
89 else if (size != 4)
90 return PCIBIOS_BAD_REGISTER_NUMBER;
91
92 return PCIBIOS_SUCCESSFUL;
93}
94
Pratyush Ananda01ef592013-12-11 15:08:32 +053095int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +090096{
97 if (size == 4)
98 writel(val, addr);
99 else if (size == 2)
100 writew(val, addr + (where & 2));
101 else if (size == 1)
102 writeb(val, addr + (where & 3));
103 else
104 return PCIBIOS_BAD_REGISTER_NUMBER;
105
106 return PCIBIOS_SUCCESSFUL;
107}
108
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900109static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900110{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900111 if (pp->ops->readl_rc)
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900112 pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900113 else
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900114 *val = readl(pp->dbi_base + reg);
Jingoo Han340cba62013-06-21 16:24:54 +0900115}
116
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900117static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
Jingoo Han340cba62013-06-21 16:24:54 +0900118{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900119 if (pp->ops->writel_rc)
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900120 pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900121 else
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900122 writel(val, pp->dbi_base + reg);
Jingoo Han340cba62013-06-21 16:24:54 +0900123}
124
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600125static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
126 u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900127{
128 int ret;
129
Jingoo Han4b1ced82013-07-31 17:14:10 +0900130 if (pp->ops->rd_own_conf)
131 ret = pp->ops->rd_own_conf(pp, where, size, val);
132 else
Pratyush Ananda01ef592013-12-11 15:08:32 +0530133 ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where,
134 size, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900135
Jingoo Han340cba62013-06-21 16:24:54 +0900136 return ret;
137}
138
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600139static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
140 u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +0900141{
142 int ret;
143
Jingoo Han4b1ced82013-07-31 17:14:10 +0900144 if (pp->ops->wr_own_conf)
145 ret = pp->ops->wr_own_conf(pp, where, size, val);
Jingoo Han340cba62013-06-21 16:24:54 +0900146 else
Pratyush Ananda01ef592013-12-11 15:08:32 +0530147 ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where,
148 size, val);
Jingoo Han340cba62013-06-21 16:24:54 +0900149
150 return ret;
151}
152
Jingoo Hanf342d942013-09-06 15:54:59 +0900153static struct irq_chip dw_msi_irq_chip = {
154 .name = "PCI-MSI",
155 .irq_enable = unmask_msi_irq,
156 .irq_disable = mask_msi_irq,
157 .irq_mask = mask_msi_irq,
158 .irq_unmask = unmask_msi_irq,
159};
160
161/* MSI int handler */
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100162irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
Jingoo Hanf342d942013-09-06 15:54:59 +0900163{
164 unsigned long val;
Pratyush Anand904d0e72013-10-09 21:32:12 +0900165 int i, pos, irq;
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100166 irqreturn_t ret = IRQ_NONE;
Jingoo Hanf342d942013-09-06 15:54:59 +0900167
168 for (i = 0; i < MAX_MSI_CTRLS; i++) {
169 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
170 (u32 *)&val);
171 if (val) {
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100172 ret = IRQ_HANDLED;
Jingoo Hanf342d942013-09-06 15:54:59 +0900173 pos = 0;
174 while ((pos = find_next_bit(&val, 32, pos)) != 32) {
Pratyush Anand904d0e72013-10-09 21:32:12 +0900175 irq = irq_find_mapping(pp->irq_domain,
176 i * 32 + pos);
Harro Haanca165892013-12-12 19:29:03 +0100177 dw_pcie_wr_own_conf(pp,
178 PCIE_MSI_INTR0_STATUS + i * 12,
179 4, 1 << pos);
Pratyush Anand904d0e72013-10-09 21:32:12 +0900180 generic_handle_irq(irq);
Jingoo Hanf342d942013-09-06 15:54:59 +0900181 pos++;
182 }
183 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900184 }
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100185
186 return ret;
Jingoo Hanf342d942013-09-06 15:54:59 +0900187}
188
189void dw_pcie_msi_init(struct pcie_port *pp)
190{
191 pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
192
193 /* program the msi_data */
194 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
195 virt_to_phys((void *)pp->msi_data));
196 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
197}
198
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400199static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
200{
201 unsigned int res, bit, val;
202
203 res = (irq / 32) * 12;
204 bit = irq % 32;
205 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
206 val &= ~(1 << bit);
207 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
208}
209
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100210static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
Jingoo Han58275f2f2013-12-27 09:30:25 +0900211 unsigned int nvec, unsigned int pos)
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100212{
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400213 unsigned int i;
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100214
Bjorn Helgaas0b8cfb62013-12-09 15:11:25 -0700215 for (i = 0; i < nvec; i++) {
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100216 irq_set_msi_desc_off(irq_base, i, NULL);
Jingoo Han58275f2f2013-12-27 09:30:25 +0900217 /* Disable corresponding interrupt on MSI controller */
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400218 if (pp->ops->msi_clear_irq)
219 pp->ops->msi_clear_irq(pp, pos + i);
220 else
221 dw_pcie_msi_clear_irq(pp, pos + i);
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100222 }
Lucas Stachc8df6ac2014-09-30 18:36:27 +0200223
224 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100225}
226
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400227static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
228{
229 unsigned int res, bit, val;
230
231 res = (irq / 32) * 12;
232 bit = irq % 32;
233 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
234 val |= 1 << bit;
235 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
236}
237
Jingoo Hanf342d942013-09-06 15:54:59 +0900238static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
239{
Lucas Stachc8df6ac2014-09-30 18:36:27 +0200240 int irq, pos0, i;
Jingoo Hanf342d942013-09-06 15:54:59 +0900241 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
242
Lucas Stachc8df6ac2014-09-30 18:36:27 +0200243 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
244 order_base_2(no_irqs));
245 if (pos0 < 0)
246 goto no_valid_irq;
Jingoo Hanf342d942013-09-06 15:54:59 +0900247
Pratyush Anand904d0e72013-10-09 21:32:12 +0900248 irq = irq_find_mapping(pp->irq_domain, pos0);
249 if (!irq)
Jingoo Hanf342d942013-09-06 15:54:59 +0900250 goto no_valid_irq;
251
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100252 /*
253 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
254 * descs so there is no need to allocate descs here. We can therefore
255 * assume that if irq_find_mapping above returns non-zero, then the
256 * descs are also successfully allocated.
257 */
258
Bjorn Helgaas0b8cfb62013-12-09 15:11:25 -0700259 for (i = 0; i < no_irqs; i++) {
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100260 if (irq_set_msi_desc_off(irq, i, desc) != 0) {
261 clear_irq_range(pp, irq, i, pos0);
262 goto no_valid_irq;
263 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900264 /*Enable corresponding interrupt in MSI interrupt controller */
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400265 if (pp->ops->msi_set_irq)
266 pp->ops->msi_set_irq(pp, pos0 + i);
267 else
268 dw_pcie_msi_set_irq(pp, pos0 + i);
Jingoo Hanf342d942013-09-06 15:54:59 +0900269 }
270
271 *pos = pos0;
272 return irq;
273
274no_valid_irq:
275 *pos = pos0;
276 return -ENOSPC;
277}
278
Jingoo Hanf342d942013-09-06 15:54:59 +0900279static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
280 struct msi_desc *desc)
281{
Lucas Stach91f8ae82014-09-30 18:36:26 +0200282 int irq, pos;
Jingoo Hanf342d942013-09-06 15:54:59 +0900283 struct msi_msg msg;
284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
285
Lucas Stach91f8ae82014-09-30 18:36:26 +0200286 irq = assign_irq(1, desc, &pos);
Jingoo Hanf342d942013-09-06 15:54:59 +0900287 if (irq < 0)
288 return irq;
289
Minghuan Lian450e3442014-09-23 22:28:58 +0800290 if (pp->ops->get_msi_addr)
291 msg.address_lo = pp->ops->get_msi_addr(pp);
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400292 else
293 msg.address_lo = virt_to_phys((void *)pp->msi_data);
Jingoo Hanf342d942013-09-06 15:54:59 +0900294 msg.address_hi = 0x0;
Minghuan Lian24832b42014-09-23 22:28:59 +0800295
296 if (pp->ops->get_msi_data)
297 msg.data = pp->ops->get_msi_data(pp, pos);
298 else
299 msg.data = pos;
300
Jingoo Hanf342d942013-09-06 15:54:59 +0900301 write_msi_msg(irq, &msg);
302
303 return 0;
304}
305
306static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
307{
Lucas Stach91f8ae82014-09-30 18:36:26 +0200308 struct irq_data *data = irq_get_irq_data(irq);
309 struct msi_desc *msi = irq_data_get_msi(data);
310 struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
311
312 clear_irq_range(pp, irq, 1, data->hwirq);
Jingoo Hanf342d942013-09-06 15:54:59 +0900313}
314
315static struct msi_chip dw_pcie_msi_chip = {
316 .setup_irq = dw_msi_setup_irq,
317 .teardown_irq = dw_msi_teardown_irq,
318};
319
Jingoo Han4b1ced82013-07-31 17:14:10 +0900320int dw_pcie_link_up(struct pcie_port *pp)
Jingoo Han340cba62013-06-21 16:24:54 +0900321{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900322 if (pp->ops->link_up)
323 return pp->ops->link_up(pp);
Jingoo Han340cba62013-06-21 16:24:54 +0900324 else
Jingoo Han340cba62013-06-21 16:24:54 +0900325 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900326}
327
Jingoo Hanf342d942013-09-06 15:54:59 +0900328static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
329 irq_hw_number_t hwirq)
330{
331 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
332 irq_set_chip_data(irq, domain->host_data);
333 set_irq_flags(irq, IRQF_VALID);
334
335 return 0;
336}
337
338static const struct irq_domain_ops msi_domain_ops = {
339 .map = dw_pcie_msi_map,
340};
341
Jingoo Han4b1ced82013-07-31 17:14:10 +0900342int __init dw_pcie_host_init(struct pcie_port *pp)
Jingoo Han340cba62013-06-21 16:24:54 +0900343{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900344 struct device_node *np = pp->dev->of_node;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530345 struct platform_device *pdev = to_platform_device(pp->dev);
Jingoo Han340cba62013-06-21 16:24:54 +0900346 struct of_pci_range range;
347 struct of_pci_range_parser parser;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530348 struct resource *cfg_res;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530349 u32 val, na, ns;
350 const __be32 *addrp;
Murali Karicherib14a3d12014-07-23 14:54:51 -0400351 int i, index, ret;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530352
353 /* Find the address cell size and the number of cells in order to get
354 * the untranslated address.
355 */
356 of_property_read_u32(np, "#address-cells", &na);
357 ns = of_n_size_cells(np);
Jingoo Hanf342d942013-09-06 15:54:59 +0900358
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530359 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
360 if (cfg_res) {
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600361 pp->cfg0_size = resource_size(cfg_res)/2;
362 pp->cfg1_size = resource_size(cfg_res)/2;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530363 pp->cfg0_base = cfg_res->start;
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600364 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530365
366 /* Find the untranslated configuration space address */
367 index = of_property_match_string(np, "reg-names", "config");
Fabio Estevam9f0dbe02014-09-22 14:52:07 -0600368 addrp = of_get_address(np, index, NULL, NULL);
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530369 pp->cfg0_mod_base = of_read_number(addrp, ns);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600370 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530371 } else {
372 dev_err(pp->dev, "missing *config* reg space\n");
373 }
374
Jingoo Han340cba62013-06-21 16:24:54 +0900375 if (of_pci_range_parser_init(&parser, np)) {
Jingoo Han4b1ced82013-07-31 17:14:10 +0900376 dev_err(pp->dev, "missing ranges property\n");
Jingoo Han340cba62013-06-21 16:24:54 +0900377 return -EINVAL;
378 }
379
380 /* Get the I/O and memory ranges from DT */
381 for_each_of_pci_range(&parser, &range) {
382 unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
Jingoo Han2c992f32014-11-12 12:27:04 +0900383
Jingoo Han340cba62013-06-21 16:24:54 +0900384 if (restype == IORESOURCE_IO) {
385 of_pci_range_to_resource(&range, np, &pp->io);
386 pp->io.name = "I/O";
387 pp->io.start = max_t(resource_size_t,
388 PCIBIOS_MIN_IO,
389 range.pci_addr + global_io_offset);
390 pp->io.end = min_t(resource_size_t,
391 IO_SPACE_LIMIT,
392 range.pci_addr + range.size
Minghuan Lian0c61ea72014-09-23 22:28:57 +0800393 + global_io_offset - 1);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600394 pp->io_size = resource_size(&pp->io);
395 pp->io_bus_addr = range.pci_addr;
Pratyush Anandfce85912013-12-11 15:08:33 +0530396 pp->io_base = range.cpu_addr;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530397
398 /* Find the untranslated IO space address */
399 pp->io_mod_base = of_read_number(parser.range -
400 parser.np + na, ns);
Jingoo Han340cba62013-06-21 16:24:54 +0900401 }
402 if (restype == IORESOURCE_MEM) {
403 of_pci_range_to_resource(&range, np, &pp->mem);
404 pp->mem.name = "MEM";
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600405 pp->mem_size = resource_size(&pp->mem);
406 pp->mem_bus_addr = range.pci_addr;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530407
408 /* Find the untranslated MEM space address */
409 pp->mem_mod_base = of_read_number(parser.range -
410 parser.np + na, ns);
Jingoo Han340cba62013-06-21 16:24:54 +0900411 }
412 if (restype == 0) {
413 of_pci_range_to_resource(&range, np, &pp->cfg);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600414 pp->cfg0_size = resource_size(&pp->cfg)/2;
415 pp->cfg1_size = resource_size(&pp->cfg)/2;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530416 pp->cfg0_base = pp->cfg.start;
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600417 pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530418
419 /* Find the untranslated configuration space address */
420 pp->cfg0_mod_base = of_read_number(parser.range -
421 parser.np + na, ns);
422 pp->cfg1_mod_base = pp->cfg0_mod_base +
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600423 pp->cfg0_size;
Jingoo Han340cba62013-06-21 16:24:54 +0900424 }
425 }
426
Lucas Stach4f2ebe02014-07-23 19:52:38 +0200427 ret = of_pci_parse_bus_range(np, &pp->busn);
428 if (ret < 0) {
429 pp->busn.name = np->name;
430 pp->busn.start = 0;
431 pp->busn.end = 0xff;
432 pp->busn.flags = IORESOURCE_BUS;
433 dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
434 ret, &pp->busn);
435 }
436
Jingoo Han4b1ced82013-07-31 17:14:10 +0900437 if (!pp->dbi_base) {
438 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
439 resource_size(&pp->cfg));
440 if (!pp->dbi_base) {
441 dev_err(pp->dev, "error with ioremap\n");
442 return -ENOMEM;
443 }
Jingoo Han340cba62013-06-21 16:24:54 +0900444 }
Jingoo Han340cba62013-06-21 16:24:54 +0900445
Jingoo Han4b1ced82013-07-31 17:14:10 +0900446 pp->mem_base = pp->mem.start;
447
Jingoo Han4b1ced82013-07-31 17:14:10 +0900448 if (!pp->va_cfg0_base) {
Murali Karicherib14a3d12014-07-23 14:54:51 -0400449 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600450 pp->cfg0_size);
Murali Karicherib14a3d12014-07-23 14:54:51 -0400451 if (!pp->va_cfg0_base) {
452 dev_err(pp->dev, "error with ioremap in function\n");
453 return -ENOMEM;
454 }
Jingoo Han340cba62013-06-21 16:24:54 +0900455 }
Murali Karicherib14a3d12014-07-23 14:54:51 -0400456
Jingoo Han4b1ced82013-07-31 17:14:10 +0900457 if (!pp->va_cfg1_base) {
Murali Karicherib14a3d12014-07-23 14:54:51 -0400458 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600459 pp->cfg1_size);
Murali Karicherib14a3d12014-07-23 14:54:51 -0400460 if (!pp->va_cfg1_base) {
461 dev_err(pp->dev, "error with ioremap\n");
462 return -ENOMEM;
463 }
Jingoo Han4b1ced82013-07-31 17:14:10 +0900464 }
Jingoo Han340cba62013-06-21 16:24:54 +0900465
Jingoo Han4b1ced82013-07-31 17:14:10 +0900466 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
467 dev_err(pp->dev, "Failed to parse the number of lanes\n");
468 return -EINVAL;
469 }
Jingoo Han340cba62013-06-21 16:24:54 +0900470
Jingoo Hanf342d942013-09-06 15:54:59 +0900471 if (IS_ENABLED(CONFIG_PCI_MSI)) {
Murali Karicherib14a3d12014-07-23 14:54:51 -0400472 if (!pp->ops->msi_host_init) {
473 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
474 MAX_MSI_IRQS, &msi_domain_ops,
475 &dw_pcie_msi_chip);
476 if (!pp->irq_domain) {
477 dev_err(pp->dev, "irq domain init failed\n");
478 return -ENXIO;
479 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900480
Murali Karicherib14a3d12014-07-23 14:54:51 -0400481 for (i = 0; i < MAX_MSI_IRQS; i++)
482 irq_create_mapping(pp->irq_domain, i);
483 } else {
484 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
485 if (ret < 0)
486 return ret;
487 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900488 }
489
Jingoo Han4b1ced82013-07-31 17:14:10 +0900490 if (pp->ops->host_init)
491 pp->ops->host_init(pp);
Jingoo Han340cba62013-06-21 16:24:54 +0900492
Jingoo Han4b1ced82013-07-31 17:14:10 +0900493 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
494
495 /* program correct class for RC */
496 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
497
498 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
499 val |= PORT_LOGIC_SPEED_CHANGE;
500 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
501
502 dw_pci.nr_controllers = 1;
503 dw_pci.private_data = (void **)&pp;
504
Lucas Stach804f57b2014-03-05 14:25:51 +0100505 pci_common_init_dev(pp->dev, &dw_pci);
Jingoo Han340cba62013-06-21 16:24:54 +0900506#ifdef CONFIG_PCI_DOMAINS
Jingoo Han4b1ced82013-07-31 17:14:10 +0900507 dw_pci.domain++;
Jingoo Han340cba62013-06-21 16:24:54 +0900508#endif
509
Jingoo Han340cba62013-06-21 16:24:54 +0900510 return 0;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900511}
Jingoo Han340cba62013-06-21 16:24:54 +0900512
Jingoo Han4b1ced82013-07-31 17:14:10 +0900513static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
514{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900515 /* Program viewport 0 : OUTBOUND : CFG0 */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900516 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
517 PCIE_ATU_VIEWPORT);
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530518 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
519 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600520 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900521 PCIE_ATU_LIMIT);
522 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
523 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
524 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1);
525 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900526}
527
528static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
529{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900530 /* Program viewport 1 : OUTBOUND : CFG1 */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900531 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
532 PCIE_ATU_VIEWPORT);
533 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530534 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
535 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600536 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900537 PCIE_ATU_LIMIT);
538 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
539 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
Mohit Kumara19f88b2014-04-14 14:22:55 -0600540 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900541}
542
543static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
544{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900545 /* Program viewport 0 : OUTBOUND : MEM */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900546 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
547 PCIE_ATU_VIEWPORT);
548 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530549 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
550 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600551 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900552 PCIE_ATU_LIMIT);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600553 dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
554 dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900555 PCIE_ATU_UPPER_TARGET);
Mohit Kumara19f88b2014-04-14 14:22:55 -0600556 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900557}
558
559static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
560{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900561 /* Program viewport 1 : OUTBOUND : IO */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900562 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
563 PCIE_ATU_VIEWPORT);
564 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530565 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
566 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600567 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900568 PCIE_ATU_LIMIT);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600569 dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
570 dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900571 PCIE_ATU_UPPER_TARGET);
Mohit Kumara19f88b2014-04-14 14:22:55 -0600572 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900573}
574
575static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
576 u32 devfn, int where, int size, u32 *val)
577{
578 int ret = PCIBIOS_SUCCESSFUL;
579 u32 address, busdev;
580
581 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
582 PCIE_ATU_FUNC(PCI_FUNC(devfn));
583 address = where & ~0x3;
584
585 if (bus->parent->number == pp->root_bus_nr) {
586 dw_pcie_prog_viewport_cfg0(pp, busdev);
Pratyush Ananda01ef592013-12-11 15:08:32 +0530587 ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
588 val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900589 dw_pcie_prog_viewport_mem_outbound(pp);
590 } else {
591 dw_pcie_prog_viewport_cfg1(pp, busdev);
Pratyush Ananda01ef592013-12-11 15:08:32 +0530592 ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
593 val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900594 dw_pcie_prog_viewport_io_outbound(pp);
595 }
596
Jingoo Han340cba62013-06-21 16:24:54 +0900597 return ret;
598}
599
Jingoo Han4b1ced82013-07-31 17:14:10 +0900600static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
601 u32 devfn, int where, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +0900602{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900603 int ret = PCIBIOS_SUCCESSFUL;
604 u32 address, busdev;
Jingoo Han340cba62013-06-21 16:24:54 +0900605
Jingoo Han4b1ced82013-07-31 17:14:10 +0900606 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
607 PCIE_ATU_FUNC(PCI_FUNC(devfn));
608 address = where & ~0x3;
Jingoo Han340cba62013-06-21 16:24:54 +0900609
Jingoo Han4b1ced82013-07-31 17:14:10 +0900610 if (bus->parent->number == pp->root_bus_nr) {
611 dw_pcie_prog_viewport_cfg0(pp, busdev);
Pratyush Ananda01ef592013-12-11 15:08:32 +0530612 ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
613 val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900614 dw_pcie_prog_viewport_mem_outbound(pp);
615 } else {
616 dw_pcie_prog_viewport_cfg1(pp, busdev);
Pratyush Ananda01ef592013-12-11 15:08:32 +0530617 ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
618 val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900619 dw_pcie_prog_viewport_io_outbound(pp);
620 }
621
622 return ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900623}
624
Jingoo Han4b1ced82013-07-31 17:14:10 +0900625static int dw_pcie_valid_config(struct pcie_port *pp,
626 struct pci_bus *bus, int dev)
Jingoo Han340cba62013-06-21 16:24:54 +0900627{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900628 /* If there is no link, then there is no device */
629 if (bus->number != pp->root_bus_nr) {
630 if (!dw_pcie_link_up(pp))
631 return 0;
632 }
Jingoo Han340cba62013-06-21 16:24:54 +0900633
Jingoo Han4b1ced82013-07-31 17:14:10 +0900634 /* access only one slot on each root port */
635 if (bus->number == pp->root_bus_nr && dev > 0)
636 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900637
638 /*
Jingoo Han4b1ced82013-07-31 17:14:10 +0900639 * do not read more than one device on the bus directly attached
640 * to RC's (Virtual Bridge's) DS side.
Jingoo Han340cba62013-06-21 16:24:54 +0900641 */
Jingoo Han4b1ced82013-07-31 17:14:10 +0900642 if (bus->primary == pp->root_bus_nr && dev > 0)
Jingoo Han340cba62013-06-21 16:24:54 +0900643 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900644
645 return 1;
646}
647
Jingoo Han4b1ced82013-07-31 17:14:10 +0900648static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
649 int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900650{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900651 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900652 int ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900653
Jingoo Han4b1ced82013-07-31 17:14:10 +0900654 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
655 *val = 0xffffffff;
656 return PCIBIOS_DEVICE_NOT_FOUND;
657 }
658
Jingoo Han4b1ced82013-07-31 17:14:10 +0900659 if (bus->number != pp->root_bus_nr)
Murali Karicheria1c0ae92014-07-21 12:58:41 -0400660 if (pp->ops->rd_other_conf)
661 ret = pp->ops->rd_other_conf(pp, bus, devfn,
662 where, size, val);
663 else
664 ret = dw_pcie_rd_other_conf(pp, bus, devfn,
Jingoo Han4b1ced82013-07-31 17:14:10 +0900665 where, size, val);
666 else
667 ret = dw_pcie_rd_own_conf(pp, where, size, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900668
669 return ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900670}
Jingoo Han4b1ced82013-07-31 17:14:10 +0900671
672static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
673 int where, int size, u32 val)
674{
675 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900676 int ret;
677
Jingoo Han4b1ced82013-07-31 17:14:10 +0900678 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
679 return PCIBIOS_DEVICE_NOT_FOUND;
680
Jingoo Han4b1ced82013-07-31 17:14:10 +0900681 if (bus->number != pp->root_bus_nr)
Murali Karicheria1c0ae92014-07-21 12:58:41 -0400682 if (pp->ops->wr_other_conf)
683 ret = pp->ops->wr_other_conf(pp, bus, devfn,
684 where, size, val);
685 else
686 ret = dw_pcie_wr_other_conf(pp, bus, devfn,
Jingoo Han4b1ced82013-07-31 17:14:10 +0900687 where, size, val);
688 else
689 ret = dw_pcie_wr_own_conf(pp, where, size, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900690
691 return ret;
692}
693
694static struct pci_ops dw_pcie_ops = {
695 .read = dw_pcie_rd_conf,
696 .write = dw_pcie_wr_conf,
697};
698
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600699static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900700{
701 struct pcie_port *pp;
702
703 pp = sys_to_pcie(sys);
704
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600705 if (global_io_offset < SZ_1M && pp->io_size > 0) {
706 sys->io_offset = global_io_offset - pp->io_bus_addr;
Pratyush Anandfce85912013-12-11 15:08:33 +0530707 pci_ioremap_io(global_io_offset, pp->io_base);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900708 global_io_offset += SZ_64K;
709 pci_add_resource_offset(&sys->resources, &pp->io,
710 sys->io_offset);
711 }
712
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600713 sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900714 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
Lucas Stach4f2ebe02014-07-23 19:52:38 +0200715 pci_add_resource(&sys->resources, &pp->busn);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900716
717 return 1;
718}
719
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600720static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900721{
722 struct pci_bus *bus;
723 struct pcie_port *pp = sys_to_pcie(sys);
724
Lucas Stach92483df2014-07-23 19:52:39 +0200725 pp->root_bus_nr = sys->busnr;
726 bus = pci_create_root_bus(pp->dev, sys->busnr,
727 &dw_pcie_ops, sys, &sys->resources);
728 if (!bus)
729 return NULL;
730
731 pci_scan_child_bus(bus);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900732
Murali Karicherib14a3d12014-07-23 14:54:51 -0400733 if (bus && pp->ops->scan_bus)
734 pp->ops->scan_bus(pp);
735
Jingoo Han4b1ced82013-07-31 17:14:10 +0900736 return bus;
737}
738
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600739static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900740{
741 struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
Lucas Stach804f57b2014-03-05 14:25:51 +0100742 int irq;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900743
Lucas Stach804f57b2014-03-05 14:25:51 +0100744 irq = of_irq_parse_and_map_pci(dev, slot, pin);
745 if (!irq)
746 irq = pp->irq;
747
748 return irq;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900749}
750
Jingoo Hanf342d942013-09-06 15:54:59 +0900751static void dw_pcie_add_bus(struct pci_bus *bus)
752{
753 if (IS_ENABLED(CONFIG_PCI_MSI)) {
754 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
755
756 dw_pcie_msi_chip.dev = pp->dev;
757 bus->msi = &dw_pcie_msi_chip;
758 }
759}
760
Jingoo Han4b1ced82013-07-31 17:14:10 +0900761static struct hw_pci dw_pci = {
762 .setup = dw_pcie_setup,
763 .scan = dw_pcie_scan_bus,
764 .map_irq = dw_pcie_map_irq,
Jingoo Hanf342d942013-09-06 15:54:59 +0900765 .add_bus = dw_pcie_add_bus,
Jingoo Han4b1ced82013-07-31 17:14:10 +0900766};
767
768void dw_pcie_setup_rc(struct pcie_port *pp)
769{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900770 u32 val;
771 u32 membase;
772 u32 memlimit;
773
Mohit Kumar66c5c342014-04-14 14:22:54 -0600774 /* set the number of lanes */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900775 dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900776 val &= ~PORT_LINK_MODE_MASK;
777 switch (pp->lanes) {
778 case 1:
779 val |= PORT_LINK_MODE_1_LANES;
780 break;
781 case 2:
782 val |= PORT_LINK_MODE_2_LANES;
783 break;
784 case 4:
785 val |= PORT_LINK_MODE_4_LANES;
786 break;
787 }
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900788 dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900789
790 /* set link width speed control register */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900791 dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900792 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
793 switch (pp->lanes) {
794 case 1:
795 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
796 break;
797 case 2:
798 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
799 break;
800 case 4:
801 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
802 break;
803 }
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900804 dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900805
806 /* setup RC BARs */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900807 dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
Mohit Kumardbffdd62014-02-19 17:34:35 +0530808 dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900809
810 /* setup interrupt pins */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900811 dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900812 val &= 0xffff00ff;
813 val |= 0x00000100;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900814 dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900815
816 /* setup bus numbers */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900817 dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900818 val &= 0xff000000;
819 val |= 0x00010100;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900820 dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900821
822 /* setup memory base, memory limit */
823 membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600824 memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900825 val = memlimit | membase;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900826 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900827
828 /* setup command register */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900829 dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900830 val &= 0xffff0000;
831 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
832 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900833 dw_pcie_writel_rc(pp, val, PCI_COMMAND);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900834}
Jingoo Han340cba62013-06-21 16:24:54 +0900835
836MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
Jingoo Han4b1ced82013-07-31 17:14:10 +0900837MODULE_DESCRIPTION("Designware PCIe host controller driver");
Jingoo Han340cba62013-06-21 16:24:54 +0900838MODULE_LICENSE("GPL v2");