blob: b77535f3967b98a031024aab5c9fd6d627ef6ad0 [file] [log] [blame]
Jingoo Han340cba62013-06-21 16:24:54 +09001/*
Jingoo Han4b1ced82013-07-31 17:14:10 +09002 * Synopsys Designware PCIe host controller driver
Jingoo Han340cba62013-06-21 16:24:54 +09003 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
Jingoo Hanf342d942013-09-06 15:54:59 +090014#include <linux/irq.h>
15#include <linux/irqdomain.h>
Jingoo Han340cba62013-06-21 16:24:54 +090016#include <linux/kernel.h>
Jingoo Han340cba62013-06-21 16:24:54 +090017#include <linux/module.h>
Jingoo Hanf342d942013-09-06 15:54:59 +090018#include <linux/msi.h>
Jingoo Han340cba62013-06-21 16:24:54 +090019#include <linux/of_address.h>
Lucas Stach804f57b2014-03-05 14:25:51 +010020#include <linux/of_pci.h>
Jingoo Han340cba62013-06-21 16:24:54 +090021#include <linux/pci.h>
22#include <linux/pci_regs.h>
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +053023#include <linux/platform_device.h>
Jingoo Han340cba62013-06-21 16:24:54 +090024#include <linux/types.h>
25
Jingoo Han4b1ced82013-07-31 17:14:10 +090026#include "pcie-designware.h"
Jingoo Han340cba62013-06-21 16:24:54 +090027
28/* Synopsis specific PCIE configuration registers */
29#define PCIE_PORT_LINK_CONTROL 0x710
30#define PORT_LINK_MODE_MASK (0x3f << 16)
Jingoo Han4b1ced82013-07-31 17:14:10 +090031#define PORT_LINK_MODE_1_LANES (0x1 << 16)
32#define PORT_LINK_MODE_2_LANES (0x3 << 16)
Jingoo Han340cba62013-06-21 16:24:54 +090033#define PORT_LINK_MODE_4_LANES (0x7 << 16)
Zhou Wang5b0f0732015-05-13 14:44:34 +080034#define PORT_LINK_MODE_8_LANES (0xf << 16)
Jingoo Han340cba62013-06-21 16:24:54 +090035
36#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
37#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
Zhou Wanged8b4722015-08-26 11:17:34 +080038#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
Jingoo Han4b1ced82013-07-31 17:14:10 +090039#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
40#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
41#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
Zhou Wang5b0f0732015-05-13 14:44:34 +080042#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
Jingoo Han340cba62013-06-21 16:24:54 +090043
44#define PCIE_MSI_ADDR_LO 0x820
45#define PCIE_MSI_ADDR_HI 0x824
46#define PCIE_MSI_INTR0_ENABLE 0x828
47#define PCIE_MSI_INTR0_MASK 0x82C
48#define PCIE_MSI_INTR0_STATUS 0x830
49
50#define PCIE_ATU_VIEWPORT 0x900
51#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
52#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
53#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
54#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
55#define PCIE_ATU_CR1 0x904
56#define PCIE_ATU_TYPE_MEM (0x0 << 0)
57#define PCIE_ATU_TYPE_IO (0x2 << 0)
58#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
59#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
60#define PCIE_ATU_CR2 0x908
61#define PCIE_ATU_ENABLE (0x1 << 31)
62#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
63#define PCIE_ATU_LOWER_BASE 0x90C
64#define PCIE_ATU_UPPER_BASE 0x910
65#define PCIE_ATU_LIMIT 0x914
66#define PCIE_ATU_LOWER_TARGET 0x918
67#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
68#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
69#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
70#define PCIE_ATU_UPPER_TARGET 0x91C
71
Jingoo Han4b1ced82013-07-31 17:14:10 +090072static struct hw_pci dw_pci;
Jingoo Han340cba62013-06-21 16:24:54 +090073
Bjorn Helgaas73e40852013-10-09 09:12:37 -060074static unsigned long global_io_offset;
Jingoo Han340cba62013-06-21 16:24:54 +090075
76static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
77{
Lucas Stach84a263f2014-09-05 09:37:55 -060078 BUG_ON(!sys->private_data);
79
Jingoo Han340cba62013-06-21 16:24:54 +090080 return sys->private_data;
81}
82
Gabriele Paoloni4c458522015-10-08 14:27:48 -050083int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +090084{
Gabriele Paolonib6b18f52015-10-08 14:27:53 -050085 if ((uintptr_t)addr & (size - 1)) {
86 *val = 0;
87 return PCIBIOS_BAD_REGISTER_NUMBER;
88 }
89
Gabriele Paolonic003ca92015-10-08 14:27:43 -050090 if (size == 4)
91 *val = readl(addr);
Jingoo Han340cba62013-06-21 16:24:54 +090092 else if (size == 2)
Gabriele Paoloni4c458522015-10-08 14:27:48 -050093 *val = readw(addr);
Gabriele Paolonic003ca92015-10-08 14:27:43 -050094 else if (size == 1)
Gabriele Paoloni4c458522015-10-08 14:27:48 -050095 *val = readb(addr);
Gabriele Paolonic003ca92015-10-08 14:27:43 -050096 else {
97 *val = 0;
Jingoo Han340cba62013-06-21 16:24:54 +090098 return PCIBIOS_BAD_REGISTER_NUMBER;
Gabriele Paolonic003ca92015-10-08 14:27:43 -050099 }
Jingoo Han340cba62013-06-21 16:24:54 +0900100
101 return PCIBIOS_SUCCESSFUL;
102}
103
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500104int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +0900105{
Gabriele Paolonib6b18f52015-10-08 14:27:53 -0500106 if ((uintptr_t)addr & (size - 1))
107 return PCIBIOS_BAD_REGISTER_NUMBER;
108
Jingoo Han340cba62013-06-21 16:24:54 +0900109 if (size == 4)
110 writel(val, addr);
111 else if (size == 2)
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500112 writew(val, addr);
Jingoo Han340cba62013-06-21 16:24:54 +0900113 else if (size == 1)
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500114 writeb(val, addr);
Jingoo Han340cba62013-06-21 16:24:54 +0900115 else
116 return PCIBIOS_BAD_REGISTER_NUMBER;
117
118 return PCIBIOS_SUCCESSFUL;
119}
120
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900121static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900122{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900123 if (pp->ops->readl_rc)
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900124 pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900125 else
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900126 *val = readl(pp->dbi_base + reg);
Jingoo Han340cba62013-06-21 16:24:54 +0900127}
128
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900129static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
Jingoo Han340cba62013-06-21 16:24:54 +0900130{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900131 if (pp->ops->writel_rc)
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900132 pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900133 else
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900134 writel(val, pp->dbi_base + reg);
Jingoo Han340cba62013-06-21 16:24:54 +0900135}
136
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600137static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
138 u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900139{
140 int ret;
141
Jingoo Han4b1ced82013-07-31 17:14:10 +0900142 if (pp->ops->rd_own_conf)
143 ret = pp->ops->rd_own_conf(pp, where, size, val);
144 else
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500145 ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900146
Jingoo Han340cba62013-06-21 16:24:54 +0900147 return ret;
148}
149
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600150static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
151 u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +0900152{
153 int ret;
154
Jingoo Han4b1ced82013-07-31 17:14:10 +0900155 if (pp->ops->wr_own_conf)
156 ret = pp->ops->wr_own_conf(pp, where, size, val);
Jingoo Han340cba62013-06-21 16:24:54 +0900157 else
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500158 ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
Jingoo Han340cba62013-06-21 16:24:54 +0900159
160 return ret;
161}
162
Jisheng Zhang63503c82015-04-30 16:22:28 +0800163static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
164 int type, u64 cpu_addr, u64 pci_addr, u32 size)
165{
166 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
167 PCIE_ATU_VIEWPORT);
168 dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE);
169 dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE);
170 dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
171 PCIE_ATU_LIMIT);
172 dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET);
173 dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET);
174 dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
175 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
176}
177
Jingoo Hanf342d942013-09-06 15:54:59 +0900178static struct irq_chip dw_msi_irq_chip = {
179 .name = "PCI-MSI",
Thomas Gleixner280510f2014-11-23 12:23:20 +0100180 .irq_enable = pci_msi_unmask_irq,
181 .irq_disable = pci_msi_mask_irq,
182 .irq_mask = pci_msi_mask_irq,
183 .irq_unmask = pci_msi_unmask_irq,
Jingoo Hanf342d942013-09-06 15:54:59 +0900184};
185
186/* MSI int handler */
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100187irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
Jingoo Hanf342d942013-09-06 15:54:59 +0900188{
189 unsigned long val;
Pratyush Anand904d0e72013-10-09 21:32:12 +0900190 int i, pos, irq;
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100191 irqreturn_t ret = IRQ_NONE;
Jingoo Hanf342d942013-09-06 15:54:59 +0900192
193 for (i = 0; i < MAX_MSI_CTRLS; i++) {
194 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
195 (u32 *)&val);
196 if (val) {
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100197 ret = IRQ_HANDLED;
Jingoo Hanf342d942013-09-06 15:54:59 +0900198 pos = 0;
199 while ((pos = find_next_bit(&val, 32, pos)) != 32) {
Pratyush Anand904d0e72013-10-09 21:32:12 +0900200 irq = irq_find_mapping(pp->irq_domain,
201 i * 32 + pos);
Harro Haanca165892013-12-12 19:29:03 +0100202 dw_pcie_wr_own_conf(pp,
203 PCIE_MSI_INTR0_STATUS + i * 12,
204 4, 1 << pos);
Pratyush Anand904d0e72013-10-09 21:32:12 +0900205 generic_handle_irq(irq);
Jingoo Hanf342d942013-09-06 15:54:59 +0900206 pos++;
207 }
208 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900209 }
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100210
211 return ret;
Jingoo Hanf342d942013-09-06 15:54:59 +0900212}
213
214void dw_pcie_msi_init(struct pcie_port *pp)
215{
Lucas Stachc8947fb2015-09-18 13:58:35 -0500216 u64 msi_target;
217
Jingoo Hanf342d942013-09-06 15:54:59 +0900218 pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
Lucas Stachc8947fb2015-09-18 13:58:35 -0500219 msi_target = virt_to_phys((void *)pp->msi_data);
Jingoo Hanf342d942013-09-06 15:54:59 +0900220
221 /* program the msi_data */
222 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
Lucas Stachc8947fb2015-09-18 13:58:35 -0500223 (u32)(msi_target & 0xffffffff));
224 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
225 (u32)(msi_target >> 32 & 0xffffffff));
Jingoo Hanf342d942013-09-06 15:54:59 +0900226}
227
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400228static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
229{
230 unsigned int res, bit, val;
231
232 res = (irq / 32) * 12;
233 bit = irq % 32;
234 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
235 val &= ~(1 << bit);
236 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
237}
238
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100239static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
Jingoo Han58275f2f2013-12-27 09:30:25 +0900240 unsigned int nvec, unsigned int pos)
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100241{
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400242 unsigned int i;
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100243
Bjorn Helgaas0b8cfb62013-12-09 15:11:25 -0700244 for (i = 0; i < nvec; i++) {
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100245 irq_set_msi_desc_off(irq_base, i, NULL);
Jingoo Han58275f2f2013-12-27 09:30:25 +0900246 /* Disable corresponding interrupt on MSI controller */
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400247 if (pp->ops->msi_clear_irq)
248 pp->ops->msi_clear_irq(pp, pos + i);
249 else
250 dw_pcie_msi_clear_irq(pp, pos + i);
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100251 }
Lucas Stachc8df6ac2014-09-30 18:36:27 +0200252
253 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100254}
255
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400256static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
257{
258 unsigned int res, bit, val;
259
260 res = (irq / 32) * 12;
261 bit = irq % 32;
262 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
263 val |= 1 << bit;
264 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
265}
266
Jingoo Hanf342d942013-09-06 15:54:59 +0900267static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
268{
Lucas Stachc8df6ac2014-09-30 18:36:27 +0200269 int irq, pos0, i;
Jiang Liue39758e2015-07-09 16:00:43 +0800270 struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(desc));
Jingoo Hanf342d942013-09-06 15:54:59 +0900271
Lucas Stachc8df6ac2014-09-30 18:36:27 +0200272 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
273 order_base_2(no_irqs));
274 if (pos0 < 0)
275 goto no_valid_irq;
Jingoo Hanf342d942013-09-06 15:54:59 +0900276
Pratyush Anand904d0e72013-10-09 21:32:12 +0900277 irq = irq_find_mapping(pp->irq_domain, pos0);
278 if (!irq)
Jingoo Hanf342d942013-09-06 15:54:59 +0900279 goto no_valid_irq;
280
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100281 /*
282 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
283 * descs so there is no need to allocate descs here. We can therefore
284 * assume that if irq_find_mapping above returns non-zero, then the
285 * descs are also successfully allocated.
286 */
287
Bjorn Helgaas0b8cfb62013-12-09 15:11:25 -0700288 for (i = 0; i < no_irqs; i++) {
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100289 if (irq_set_msi_desc_off(irq, i, desc) != 0) {
290 clear_irq_range(pp, irq, i, pos0);
291 goto no_valid_irq;
292 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900293 /*Enable corresponding interrupt in MSI interrupt controller */
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400294 if (pp->ops->msi_set_irq)
295 pp->ops->msi_set_irq(pp, pos0 + i);
296 else
297 dw_pcie_msi_set_irq(pp, pos0 + i);
Jingoo Hanf342d942013-09-06 15:54:59 +0900298 }
299
300 *pos = pos0;
Lucas Stach79707372015-09-18 13:58:35 -0500301 desc->nvec_used = no_irqs;
302 desc->msi_attrib.multiple = order_base_2(no_irqs);
303
Jingoo Hanf342d942013-09-06 15:54:59 +0900304 return irq;
305
306no_valid_irq:
307 *pos = pos0;
308 return -ENOSPC;
309}
310
Lucas Stachea643e12015-09-18 13:58:35 -0500311static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
Jingoo Hanf342d942013-09-06 15:54:59 +0900312{
Jingoo Hanf342d942013-09-06 15:54:59 +0900313 struct msi_msg msg;
Lucas Stachc8947fb2015-09-18 13:58:35 -0500314 u64 msi_target;
Jingoo Hanf342d942013-09-06 15:54:59 +0900315
Minghuan Lian450e3442014-09-23 22:28:58 +0800316 if (pp->ops->get_msi_addr)
Lucas Stachc8947fb2015-09-18 13:58:35 -0500317 msi_target = pp->ops->get_msi_addr(pp);
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400318 else
Lucas Stachc8947fb2015-09-18 13:58:35 -0500319 msi_target = virt_to_phys((void *)pp->msi_data);
320
321 msg.address_lo = (u32)(msi_target & 0xffffffff);
322 msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
Minghuan Lian24832b42014-09-23 22:28:59 +0800323
324 if (pp->ops->get_msi_data)
325 msg.data = pp->ops->get_msi_data(pp, pos);
326 else
327 msg.data = pos;
328
Jiang Liu83a18912014-11-09 23:10:34 +0800329 pci_write_msi_msg(irq, &msg);
Lucas Stachea643e12015-09-18 13:58:35 -0500330}
331
332static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
333 struct msi_desc *desc)
334{
335 int irq, pos;
336 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
337
338 if (desc->msi_attrib.is_msix)
339 return -EINVAL;
340
341 irq = assign_irq(1, desc, &pos);
342 if (irq < 0)
343 return irq;
344
345 dw_msi_setup_msg(pp, irq, pos);
Jingoo Hanf342d942013-09-06 15:54:59 +0900346
347 return 0;
348}
349
Lucas Stach79707372015-09-18 13:58:35 -0500350static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
351 int nvec, int type)
352{
353#ifdef CONFIG_PCI_MSI
354 int irq, pos;
355 struct msi_desc *desc;
356 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
357
358 /* MSI-X interrupts are not supported */
359 if (type == PCI_CAP_ID_MSIX)
360 return -EINVAL;
361
362 WARN_ON(!list_is_singular(&pdev->dev.msi_list));
363 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
364
365 irq = assign_irq(nvec, desc, &pos);
366 if (irq < 0)
367 return irq;
368
369 dw_msi_setup_msg(pp, irq, pos);
370
371 return 0;
372#else
373 return -EINVAL;
374#endif
375}
376
Yijing Wangc2791b82014-11-11 17:45:45 -0700377static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
Jingoo Hanf342d942013-09-06 15:54:59 +0900378{
Lucas Stach91f8ae82014-09-30 18:36:26 +0200379 struct irq_data *data = irq_get_irq_data(irq);
Jiang Liuc391f262015-06-01 16:05:41 +0800380 struct msi_desc *msi = irq_data_get_msi_desc(data);
Jiang Liue39758e2015-07-09 16:00:43 +0800381 struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
Lucas Stach91f8ae82014-09-30 18:36:26 +0200382
383 clear_irq_range(pp, irq, 1, data->hwirq);
Jingoo Hanf342d942013-09-06 15:54:59 +0900384}
385
Yijing Wangc2791b82014-11-11 17:45:45 -0700386static struct msi_controller dw_pcie_msi_chip = {
Jingoo Hanf342d942013-09-06 15:54:59 +0900387 .setup_irq = dw_msi_setup_irq,
Lucas Stach79707372015-09-18 13:58:35 -0500388 .setup_irqs = dw_msi_setup_irqs,
Jingoo Hanf342d942013-09-06 15:54:59 +0900389 .teardown_irq = dw_msi_teardown_irq,
390};
391
Jingoo Han4b1ced82013-07-31 17:14:10 +0900392int dw_pcie_link_up(struct pcie_port *pp)
Jingoo Han340cba62013-06-21 16:24:54 +0900393{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900394 if (pp->ops->link_up)
395 return pp->ops->link_up(pp);
Jingoo Han340cba62013-06-21 16:24:54 +0900396 else
Jingoo Han340cba62013-06-21 16:24:54 +0900397 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900398}
399
Jingoo Hanf342d942013-09-06 15:54:59 +0900400static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
401 irq_hw_number_t hwirq)
402{
403 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
404 irq_set_chip_data(irq, domain->host_data);
Jingoo Hanf342d942013-09-06 15:54:59 +0900405
406 return 0;
407}
408
409static const struct irq_domain_ops msi_domain_ops = {
410 .map = dw_pcie_msi_map,
411};
412
Matwey V. Kornilova43f32d2015-02-19 20:41:48 +0300413int dw_pcie_host_init(struct pcie_port *pp)
Jingoo Han340cba62013-06-21 16:24:54 +0900414{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900415 struct device_node *np = pp->dev->of_node;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530416 struct platform_device *pdev = to_platform_device(pp->dev);
Jingoo Han340cba62013-06-21 16:24:54 +0900417 struct of_pci_range range;
418 struct of_pci_range_parser parser;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530419 struct resource *cfg_res;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530420 u32 val, na, ns;
421 const __be32 *addrp;
Murali Karicherib14a3d12014-07-23 14:54:51 -0400422 int i, index, ret;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530423
424 /* Find the address cell size and the number of cells in order to get
425 * the untranslated address.
426 */
427 of_property_read_u32(np, "#address-cells", &na);
428 ns = of_n_size_cells(np);
Jingoo Hanf342d942013-09-06 15:54:59 +0900429
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530430 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
431 if (cfg_res) {
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600432 pp->cfg0_size = resource_size(cfg_res)/2;
433 pp->cfg1_size = resource_size(cfg_res)/2;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530434 pp->cfg0_base = cfg_res->start;
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600435 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530436
437 /* Find the untranslated configuration space address */
438 index = of_property_match_string(np, "reg-names", "config");
Fabio Estevam9f0dbe02014-09-22 14:52:07 -0600439 addrp = of_get_address(np, index, NULL, NULL);
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530440 pp->cfg0_mod_base = of_read_number(addrp, ns);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600441 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
Murali Karicheri0f414212015-07-21 17:54:11 -0400442 } else if (!pp->va_cfg0_base) {
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530443 dev_err(pp->dev, "missing *config* reg space\n");
444 }
445
Jingoo Han340cba62013-06-21 16:24:54 +0900446 if (of_pci_range_parser_init(&parser, np)) {
Jingoo Han4b1ced82013-07-31 17:14:10 +0900447 dev_err(pp->dev, "missing ranges property\n");
Jingoo Han340cba62013-06-21 16:24:54 +0900448 return -EINVAL;
449 }
450
451 /* Get the I/O and memory ranges from DT */
452 for_each_of_pci_range(&parser, &range) {
453 unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
Jingoo Han2c992f32014-11-12 12:27:04 +0900454
Jingoo Han340cba62013-06-21 16:24:54 +0900455 if (restype == IORESOURCE_IO) {
456 of_pci_range_to_resource(&range, np, &pp->io);
457 pp->io.name = "I/O";
458 pp->io.start = max_t(resource_size_t,
459 PCIBIOS_MIN_IO,
460 range.pci_addr + global_io_offset);
461 pp->io.end = min_t(resource_size_t,
462 IO_SPACE_LIMIT,
463 range.pci_addr + range.size
Minghuan Lian0c61ea72014-09-23 22:28:57 +0800464 + global_io_offset - 1);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600465 pp->io_size = resource_size(&pp->io);
466 pp->io_bus_addr = range.pci_addr;
Pratyush Anandfce85912013-12-11 15:08:33 +0530467 pp->io_base = range.cpu_addr;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530468
469 /* Find the untranslated IO space address */
470 pp->io_mod_base = of_read_number(parser.range -
471 parser.np + na, ns);
Jingoo Han340cba62013-06-21 16:24:54 +0900472 }
473 if (restype == IORESOURCE_MEM) {
474 of_pci_range_to_resource(&range, np, &pp->mem);
475 pp->mem.name = "MEM";
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600476 pp->mem_size = resource_size(&pp->mem);
477 pp->mem_bus_addr = range.pci_addr;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530478
479 /* Find the untranslated MEM space address */
480 pp->mem_mod_base = of_read_number(parser.range -
481 parser.np + na, ns);
Jingoo Han340cba62013-06-21 16:24:54 +0900482 }
483 if (restype == 0) {
484 of_pci_range_to_resource(&range, np, &pp->cfg);
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600485 pp->cfg0_size = resource_size(&pp->cfg)/2;
486 pp->cfg1_size = resource_size(&pp->cfg)/2;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530487 pp->cfg0_base = pp->cfg.start;
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600488 pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
Kishon Vijay Abraham If4c55c52014-07-17 14:30:41 +0530489
490 /* Find the untranslated configuration space address */
491 pp->cfg0_mod_base = of_read_number(parser.range -
492 parser.np + na, ns);
493 pp->cfg1_mod_base = pp->cfg0_mod_base +
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600494 pp->cfg0_size;
Jingoo Han340cba62013-06-21 16:24:54 +0900495 }
496 }
497
Lucas Stach4f2ebe02014-07-23 19:52:38 +0200498 ret = of_pci_parse_bus_range(np, &pp->busn);
499 if (ret < 0) {
500 pp->busn.name = np->name;
501 pp->busn.start = 0;
502 pp->busn.end = 0xff;
503 pp->busn.flags = IORESOURCE_BUS;
504 dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
505 ret, &pp->busn);
506 }
507
Jingoo Han4b1ced82013-07-31 17:14:10 +0900508 if (!pp->dbi_base) {
509 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
510 resource_size(&pp->cfg));
511 if (!pp->dbi_base) {
512 dev_err(pp->dev, "error with ioremap\n");
513 return -ENOMEM;
514 }
Jingoo Han340cba62013-06-21 16:24:54 +0900515 }
Jingoo Han340cba62013-06-21 16:24:54 +0900516
Jingoo Han4b1ced82013-07-31 17:14:10 +0900517 pp->mem_base = pp->mem.start;
518
Jingoo Han4b1ced82013-07-31 17:14:10 +0900519 if (!pp->va_cfg0_base) {
Murali Karicherib14a3d12014-07-23 14:54:51 -0400520 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600521 pp->cfg0_size);
Murali Karicherib14a3d12014-07-23 14:54:51 -0400522 if (!pp->va_cfg0_base) {
523 dev_err(pp->dev, "error with ioremap in function\n");
524 return -ENOMEM;
525 }
Jingoo Han340cba62013-06-21 16:24:54 +0900526 }
Murali Karicherib14a3d12014-07-23 14:54:51 -0400527
Jingoo Han4b1ced82013-07-31 17:14:10 +0900528 if (!pp->va_cfg1_base) {
Murali Karicherib14a3d12014-07-23 14:54:51 -0400529 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600530 pp->cfg1_size);
Murali Karicherib14a3d12014-07-23 14:54:51 -0400531 if (!pp->va_cfg1_base) {
532 dev_err(pp->dev, "error with ioremap\n");
533 return -ENOMEM;
534 }
Jingoo Han4b1ced82013-07-31 17:14:10 +0900535 }
Jingoo Han340cba62013-06-21 16:24:54 +0900536
Jingoo Han4b1ced82013-07-31 17:14:10 +0900537 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
538 dev_err(pp->dev, "Failed to parse the number of lanes\n");
539 return -EINVAL;
540 }
Jingoo Han340cba62013-06-21 16:24:54 +0900541
Jingoo Hanf342d942013-09-06 15:54:59 +0900542 if (IS_ENABLED(CONFIG_PCI_MSI)) {
Murali Karicherib14a3d12014-07-23 14:54:51 -0400543 if (!pp->ops->msi_host_init) {
544 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
545 MAX_MSI_IRQS, &msi_domain_ops,
546 &dw_pcie_msi_chip);
547 if (!pp->irq_domain) {
548 dev_err(pp->dev, "irq domain init failed\n");
549 return -ENXIO;
550 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900551
Murali Karicherib14a3d12014-07-23 14:54:51 -0400552 for (i = 0; i < MAX_MSI_IRQS; i++)
553 irq_create_mapping(pp->irq_domain, i);
554 } else {
555 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
556 if (ret < 0)
557 return ret;
558 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900559 }
560
Jingoo Han4b1ced82013-07-31 17:14:10 +0900561 if (pp->ops->host_init)
562 pp->ops->host_init(pp);
Jingoo Han340cba62013-06-21 16:24:54 +0900563
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800564 if (!pp->ops->rd_other_conf)
565 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
566 PCIE_ATU_TYPE_MEM, pp->mem_mod_base,
567 pp->mem_bus_addr, pp->mem_size);
568
Jingoo Han4b1ced82013-07-31 17:14:10 +0900569 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
570
571 /* program correct class for RC */
572 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
573
574 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
575 val |= PORT_LOGIC_SPEED_CHANGE;
576 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
577
Yijing Wang0815f952014-11-11 15:38:07 -0700578#ifdef CONFIG_PCI_MSI
579 dw_pcie_msi_chip.dev = pp->dev;
Yijing Wang0815f952014-11-11 15:38:07 -0700580#endif
581
Jingoo Han4b1ced82013-07-31 17:14:10 +0900582 dw_pci.nr_controllers = 1;
583 dw_pci.private_data = (void **)&pp;
584
Lucas Stach804f57b2014-03-05 14:25:51 +0100585 pci_common_init_dev(pp->dev, &dw_pci);
Jingoo Han340cba62013-06-21 16:24:54 +0900586
Jingoo Han340cba62013-06-21 16:24:54 +0900587 return 0;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900588}
Jingoo Han340cba62013-06-21 16:24:54 +0900589
Jingoo Han4b1ced82013-07-31 17:14:10 +0900590static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
591 u32 devfn, int where, int size, u32 *val)
592{
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800593 int ret, type;
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500594 u32 busdev, cfg_size;
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800595 u64 cpu_addr;
596 void __iomem *va_cfg_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900597
598 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
599 PCIE_ATU_FUNC(PCI_FUNC(devfn));
Jingoo Han4b1ced82013-07-31 17:14:10 +0900600
601 if (bus->parent->number == pp->root_bus_nr) {
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800602 type = PCIE_ATU_TYPE_CFG0;
603 cpu_addr = pp->cfg0_mod_base;
604 cfg_size = pp->cfg0_size;
605 va_cfg_base = pp->va_cfg0_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900606 } else {
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800607 type = PCIE_ATU_TYPE_CFG1;
608 cpu_addr = pp->cfg1_mod_base;
609 cfg_size = pp->cfg1_size;
610 va_cfg_base = pp->va_cfg1_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900611 }
612
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800613 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
614 type, cpu_addr,
615 busdev, cfg_size);
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500616 ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800617 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
618 PCIE_ATU_TYPE_IO, pp->io_mod_base,
619 pp->io_bus_addr, pp->io_size);
620
Jingoo Han340cba62013-06-21 16:24:54 +0900621 return ret;
622}
623
Jingoo Han4b1ced82013-07-31 17:14:10 +0900624static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
625 u32 devfn, int where, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +0900626{
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800627 int ret, type;
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500628 u32 busdev, cfg_size;
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800629 u64 cpu_addr;
630 void __iomem *va_cfg_base;
Jingoo Han340cba62013-06-21 16:24:54 +0900631
Jingoo Han4b1ced82013-07-31 17:14:10 +0900632 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
633 PCIE_ATU_FUNC(PCI_FUNC(devfn));
Jingoo Han340cba62013-06-21 16:24:54 +0900634
Jingoo Han4b1ced82013-07-31 17:14:10 +0900635 if (bus->parent->number == pp->root_bus_nr) {
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800636 type = PCIE_ATU_TYPE_CFG0;
637 cpu_addr = pp->cfg0_mod_base;
638 cfg_size = pp->cfg0_size;
639 va_cfg_base = pp->va_cfg0_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900640 } else {
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800641 type = PCIE_ATU_TYPE_CFG1;
642 cpu_addr = pp->cfg1_mod_base;
643 cfg_size = pp->cfg1_size;
644 va_cfg_base = pp->va_cfg1_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900645 }
646
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800647 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
648 type, cpu_addr,
649 busdev, cfg_size);
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500650 ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800651 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
652 PCIE_ATU_TYPE_IO, pp->io_mod_base,
653 pp->io_bus_addr, pp->io_size);
654
Jingoo Han4b1ced82013-07-31 17:14:10 +0900655 return ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900656}
657
Jingoo Han4b1ced82013-07-31 17:14:10 +0900658static int dw_pcie_valid_config(struct pcie_port *pp,
659 struct pci_bus *bus, int dev)
Jingoo Han340cba62013-06-21 16:24:54 +0900660{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900661 /* If there is no link, then there is no device */
662 if (bus->number != pp->root_bus_nr) {
663 if (!dw_pcie_link_up(pp))
664 return 0;
665 }
Jingoo Han340cba62013-06-21 16:24:54 +0900666
Jingoo Han4b1ced82013-07-31 17:14:10 +0900667 /* access only one slot on each root port */
668 if (bus->number == pp->root_bus_nr && dev > 0)
669 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900670
671 /*
Jingoo Han4b1ced82013-07-31 17:14:10 +0900672 * do not read more than one device on the bus directly attached
673 * to RC's (Virtual Bridge's) DS side.
Jingoo Han340cba62013-06-21 16:24:54 +0900674 */
Jingoo Han4b1ced82013-07-31 17:14:10 +0900675 if (bus->primary == pp->root_bus_nr && dev > 0)
Jingoo Han340cba62013-06-21 16:24:54 +0900676 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900677
678 return 1;
679}
680
Jingoo Han4b1ced82013-07-31 17:14:10 +0900681static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
682 int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900683{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900684 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900685 int ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900686
Jingoo Han4b1ced82013-07-31 17:14:10 +0900687 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
688 *val = 0xffffffff;
689 return PCIBIOS_DEVICE_NOT_FOUND;
690 }
691
Jingoo Han4b1ced82013-07-31 17:14:10 +0900692 if (bus->number != pp->root_bus_nr)
Murali Karicheria1c0ae92014-07-21 12:58:41 -0400693 if (pp->ops->rd_other_conf)
694 ret = pp->ops->rd_other_conf(pp, bus, devfn,
695 where, size, val);
696 else
697 ret = dw_pcie_rd_other_conf(pp, bus, devfn,
Jingoo Han4b1ced82013-07-31 17:14:10 +0900698 where, size, val);
699 else
700 ret = dw_pcie_rd_own_conf(pp, where, size, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900701
702 return ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900703}
Jingoo Han4b1ced82013-07-31 17:14:10 +0900704
705static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
706 int where, int size, u32 val)
707{
708 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900709 int ret;
710
Jingoo Han4b1ced82013-07-31 17:14:10 +0900711 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
712 return PCIBIOS_DEVICE_NOT_FOUND;
713
Jingoo Han4b1ced82013-07-31 17:14:10 +0900714 if (bus->number != pp->root_bus_nr)
Murali Karicheria1c0ae92014-07-21 12:58:41 -0400715 if (pp->ops->wr_other_conf)
716 ret = pp->ops->wr_other_conf(pp, bus, devfn,
717 where, size, val);
718 else
719 ret = dw_pcie_wr_other_conf(pp, bus, devfn,
Jingoo Han4b1ced82013-07-31 17:14:10 +0900720 where, size, val);
721 else
722 ret = dw_pcie_wr_own_conf(pp, where, size, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900723
724 return ret;
725}
726
727static struct pci_ops dw_pcie_ops = {
728 .read = dw_pcie_rd_conf,
729 .write = dw_pcie_wr_conf,
730};
731
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600732static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900733{
734 struct pcie_port *pp;
735
736 pp = sys_to_pcie(sys);
737
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600738 if (global_io_offset < SZ_1M && pp->io_size > 0) {
739 sys->io_offset = global_io_offset - pp->io_bus_addr;
Pratyush Anandfce85912013-12-11 15:08:33 +0530740 pci_ioremap_io(global_io_offset, pp->io_base);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900741 global_io_offset += SZ_64K;
742 pci_add_resource_offset(&sys->resources, &pp->io,
743 sys->io_offset);
744 }
745
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600746 sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900747 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
Lucas Stach4f2ebe02014-07-23 19:52:38 +0200748 pci_add_resource(&sys->resources, &pp->busn);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900749
750 return 1;
751}
752
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600753static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900754{
755 struct pci_bus *bus;
756 struct pcie_port *pp = sys_to_pcie(sys);
757
Lucas Stach92483df2014-07-23 19:52:39 +0200758 pp->root_bus_nr = sys->busnr;
Lorenzo Pieralisi8953aab2015-07-29 12:33:18 +0100759
760 if (IS_ENABLED(CONFIG_PCI_MSI))
761 bus = pci_scan_root_bus_msi(pp->dev, sys->busnr, &dw_pcie_ops,
762 sys, &sys->resources,
763 &dw_pcie_msi_chip);
764 else
765 bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
766 sys, &sys->resources);
767
Lucas Stach92483df2014-07-23 19:52:39 +0200768 if (!bus)
769 return NULL;
770
Murali Karicherib14a3d12014-07-23 14:54:51 -0400771 if (bus && pp->ops->scan_bus)
772 pp->ops->scan_bus(pp);
773
Jingoo Han4b1ced82013-07-31 17:14:10 +0900774 return bus;
775}
776
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600777static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900778{
779 struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
Lucas Stach804f57b2014-03-05 14:25:51 +0100780 int irq;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900781
Lucas Stach804f57b2014-03-05 14:25:51 +0100782 irq = of_irq_parse_and_map_pci(dev, slot, pin);
783 if (!irq)
784 irq = pp->irq;
785
786 return irq;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900787}
788
789static struct hw_pci dw_pci = {
790 .setup = dw_pcie_setup,
791 .scan = dw_pcie_scan_bus,
792 .map_irq = dw_pcie_map_irq,
793};
794
795void dw_pcie_setup_rc(struct pcie_port *pp)
796{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900797 u32 val;
798 u32 membase;
799 u32 memlimit;
800
Mohit Kumar66c5c342014-04-14 14:22:54 -0600801 /* set the number of lanes */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900802 dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900803 val &= ~PORT_LINK_MODE_MASK;
804 switch (pp->lanes) {
805 case 1:
806 val |= PORT_LINK_MODE_1_LANES;
807 break;
808 case 2:
809 val |= PORT_LINK_MODE_2_LANES;
810 break;
811 case 4:
812 val |= PORT_LINK_MODE_4_LANES;
813 break;
Zhou Wang5b0f0732015-05-13 14:44:34 +0800814 case 8:
815 val |= PORT_LINK_MODE_8_LANES;
816 break;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900817 }
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900818 dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900819
820 /* set link width speed control register */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900821 dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900822 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
823 switch (pp->lanes) {
824 case 1:
825 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
826 break;
827 case 2:
828 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
829 break;
830 case 4:
831 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
832 break;
Zhou Wang5b0f0732015-05-13 14:44:34 +0800833 case 8:
834 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
835 break;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900836 }
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900837 dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900838
839 /* setup RC BARs */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900840 dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
Mohit Kumardbffdd62014-02-19 17:34:35 +0530841 dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900842
843 /* setup interrupt pins */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900844 dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900845 val &= 0xffff00ff;
846 val |= 0x00000100;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900847 dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900848
849 /* setup bus numbers */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900850 dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900851 val &= 0xff000000;
852 val |= 0x00010100;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900853 dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900854
855 /* setup memory base, memory limit */
856 membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600857 memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900858 val = memlimit | membase;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900859 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900860
861 /* setup command register */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900862 dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900863 val &= 0xffff0000;
864 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
865 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900866 dw_pcie_writel_rc(pp, val, PCI_COMMAND);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900867}
Jingoo Han340cba62013-06-21 16:24:54 +0900868
869MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
Jingoo Han4b1ced82013-07-31 17:14:10 +0900870MODULE_DESCRIPTION("Designware PCIe host controller driver");
Jingoo Han340cba62013-06-21 16:24:54 +0900871MODULE_LICENSE("GPL v2");