blob: 21ae17e52a4cc5011628c4a42683a32d0620d1a9 [file] [log] [blame]
Jingoo Han340cba62013-06-21 16:24:54 +09001/*
Jingoo Han4b1ced82013-07-31 17:14:10 +09002 * Synopsys Designware PCIe host controller driver
Jingoo Han340cba62013-06-21 16:24:54 +09003 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
Jingoo Hanf342d942013-09-06 15:54:59 +090014#include <linux/irq.h>
15#include <linux/irqdomain.h>
Jingoo Han340cba62013-06-21 16:24:54 +090016#include <linux/kernel.h>
Jingoo Han340cba62013-06-21 16:24:54 +090017#include <linux/module.h>
Jingoo Hanf342d942013-09-06 15:54:59 +090018#include <linux/msi.h>
Jingoo Han340cba62013-06-21 16:24:54 +090019#include <linux/of_address.h>
Lucas Stach804f57b2014-03-05 14:25:51 +010020#include <linux/of_pci.h>
Jingoo Han340cba62013-06-21 16:24:54 +090021#include <linux/pci.h>
22#include <linux/pci_regs.h>
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +053023#include <linux/platform_device.h>
Jingoo Han340cba62013-06-21 16:24:54 +090024#include <linux/types.h>
25
Jingoo Han4b1ced82013-07-31 17:14:10 +090026#include "pcie-designware.h"
Jingoo Han340cba62013-06-21 16:24:54 +090027
28/* Synopsis specific PCIE configuration registers */
29#define PCIE_PORT_LINK_CONTROL 0x710
30#define PORT_LINK_MODE_MASK (0x3f << 16)
Jingoo Han4b1ced82013-07-31 17:14:10 +090031#define PORT_LINK_MODE_1_LANES (0x1 << 16)
32#define PORT_LINK_MODE_2_LANES (0x3 << 16)
Jingoo Han340cba62013-06-21 16:24:54 +090033#define PORT_LINK_MODE_4_LANES (0x7 << 16)
Zhou Wang5b0f0732015-05-13 14:44:34 +080034#define PORT_LINK_MODE_8_LANES (0xf << 16)
Jingoo Han340cba62013-06-21 16:24:54 +090035
36#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
37#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
Zhou Wanged8b4722015-08-26 11:17:34 +080038#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
Jingoo Han4b1ced82013-07-31 17:14:10 +090039#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
40#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
41#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
Zhou Wang5b0f0732015-05-13 14:44:34 +080042#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
Jingoo Han340cba62013-06-21 16:24:54 +090043
44#define PCIE_MSI_ADDR_LO 0x820
45#define PCIE_MSI_ADDR_HI 0x824
46#define PCIE_MSI_INTR0_ENABLE 0x828
47#define PCIE_MSI_INTR0_MASK 0x82C
48#define PCIE_MSI_INTR0_STATUS 0x830
49
50#define PCIE_ATU_VIEWPORT 0x900
51#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
52#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
53#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
54#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
55#define PCIE_ATU_CR1 0x904
56#define PCIE_ATU_TYPE_MEM (0x0 << 0)
57#define PCIE_ATU_TYPE_IO (0x2 << 0)
58#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
59#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
60#define PCIE_ATU_CR2 0x908
61#define PCIE_ATU_ENABLE (0x1 << 31)
62#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
63#define PCIE_ATU_LOWER_BASE 0x90C
64#define PCIE_ATU_UPPER_BASE 0x910
65#define PCIE_ATU_LIMIT 0x914
66#define PCIE_ATU_LOWER_TARGET 0x918
67#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
68#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
69#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
70#define PCIE_ATU_UPPER_TARGET 0x91C
71
Jingoo Han4b1ced82013-07-31 17:14:10 +090072static struct hw_pci dw_pci;
Jingoo Han340cba62013-06-21 16:24:54 +090073
Bjorn Helgaas73e40852013-10-09 09:12:37 -060074static unsigned long global_io_offset;
Jingoo Han340cba62013-06-21 16:24:54 +090075
76static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
77{
Lucas Stach84a263f2014-09-05 09:37:55 -060078 BUG_ON(!sys->private_data);
79
Jingoo Han340cba62013-06-21 16:24:54 +090080 return sys->private_data;
81}
82
Gabriele Paoloni4c458522015-10-08 14:27:48 -050083int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +090084{
Gabriele Paolonib6b18f52015-10-08 14:27:53 -050085 if ((uintptr_t)addr & (size - 1)) {
86 *val = 0;
87 return PCIBIOS_BAD_REGISTER_NUMBER;
88 }
89
Gabriele Paolonic003ca92015-10-08 14:27:43 -050090 if (size == 4)
91 *val = readl(addr);
Jingoo Han340cba62013-06-21 16:24:54 +090092 else if (size == 2)
Gabriele Paoloni4c458522015-10-08 14:27:48 -050093 *val = readw(addr);
Gabriele Paolonic003ca92015-10-08 14:27:43 -050094 else if (size == 1)
Gabriele Paoloni4c458522015-10-08 14:27:48 -050095 *val = readb(addr);
Gabriele Paolonic003ca92015-10-08 14:27:43 -050096 else {
97 *val = 0;
Jingoo Han340cba62013-06-21 16:24:54 +090098 return PCIBIOS_BAD_REGISTER_NUMBER;
Gabriele Paolonic003ca92015-10-08 14:27:43 -050099 }
Jingoo Han340cba62013-06-21 16:24:54 +0900100
101 return PCIBIOS_SUCCESSFUL;
102}
103
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500104int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +0900105{
Gabriele Paolonib6b18f52015-10-08 14:27:53 -0500106 if ((uintptr_t)addr & (size - 1))
107 return PCIBIOS_BAD_REGISTER_NUMBER;
108
Jingoo Han340cba62013-06-21 16:24:54 +0900109 if (size == 4)
110 writel(val, addr);
111 else if (size == 2)
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500112 writew(val, addr);
Jingoo Han340cba62013-06-21 16:24:54 +0900113 else if (size == 1)
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500114 writeb(val, addr);
Jingoo Han340cba62013-06-21 16:24:54 +0900115 else
116 return PCIBIOS_BAD_REGISTER_NUMBER;
117
118 return PCIBIOS_SUCCESSFUL;
119}
120
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900121static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900122{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900123 if (pp->ops->readl_rc)
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900124 pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900125 else
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900126 *val = readl(pp->dbi_base + reg);
Jingoo Han340cba62013-06-21 16:24:54 +0900127}
128
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900129static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
Jingoo Han340cba62013-06-21 16:24:54 +0900130{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900131 if (pp->ops->writel_rc)
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900132 pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900133 else
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900134 writel(val, pp->dbi_base + reg);
Jingoo Han340cba62013-06-21 16:24:54 +0900135}
136
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600137static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
138 u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900139{
140 int ret;
141
Jingoo Han4b1ced82013-07-31 17:14:10 +0900142 if (pp->ops->rd_own_conf)
143 ret = pp->ops->rd_own_conf(pp, where, size, val);
144 else
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500145 ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900146
Jingoo Han340cba62013-06-21 16:24:54 +0900147 return ret;
148}
149
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600150static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
151 u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +0900152{
153 int ret;
154
Jingoo Han4b1ced82013-07-31 17:14:10 +0900155 if (pp->ops->wr_own_conf)
156 ret = pp->ops->wr_own_conf(pp, where, size, val);
Jingoo Han340cba62013-06-21 16:24:54 +0900157 else
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500158 ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
Jingoo Han340cba62013-06-21 16:24:54 +0900159
160 return ret;
161}
162
Jisheng Zhang63503c82015-04-30 16:22:28 +0800163static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
164 int type, u64 cpu_addr, u64 pci_addr, u32 size)
165{
166 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
167 PCIE_ATU_VIEWPORT);
168 dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE);
169 dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE);
170 dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
171 PCIE_ATU_LIMIT);
172 dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET);
173 dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET);
174 dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
175 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
176}
177
Jingoo Hanf342d942013-09-06 15:54:59 +0900178static struct irq_chip dw_msi_irq_chip = {
179 .name = "PCI-MSI",
Thomas Gleixner280510f2014-11-23 12:23:20 +0100180 .irq_enable = pci_msi_unmask_irq,
181 .irq_disable = pci_msi_mask_irq,
182 .irq_mask = pci_msi_mask_irq,
183 .irq_unmask = pci_msi_unmask_irq,
Jingoo Hanf342d942013-09-06 15:54:59 +0900184};
185
186/* MSI int handler */
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100187irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
Jingoo Hanf342d942013-09-06 15:54:59 +0900188{
189 unsigned long val;
Pratyush Anand904d0e72013-10-09 21:32:12 +0900190 int i, pos, irq;
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100191 irqreturn_t ret = IRQ_NONE;
Jingoo Hanf342d942013-09-06 15:54:59 +0900192
193 for (i = 0; i < MAX_MSI_CTRLS; i++) {
194 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
195 (u32 *)&val);
196 if (val) {
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100197 ret = IRQ_HANDLED;
Jingoo Hanf342d942013-09-06 15:54:59 +0900198 pos = 0;
199 while ((pos = find_next_bit(&val, 32, pos)) != 32) {
Pratyush Anand904d0e72013-10-09 21:32:12 +0900200 irq = irq_find_mapping(pp->irq_domain,
201 i * 32 + pos);
Harro Haanca165892013-12-12 19:29:03 +0100202 dw_pcie_wr_own_conf(pp,
203 PCIE_MSI_INTR0_STATUS + i * 12,
204 4, 1 << pos);
Pratyush Anand904d0e72013-10-09 21:32:12 +0900205 generic_handle_irq(irq);
Jingoo Hanf342d942013-09-06 15:54:59 +0900206 pos++;
207 }
208 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900209 }
Lucas Stach7f4f16e2014-03-28 17:52:58 +0100210
211 return ret;
Jingoo Hanf342d942013-09-06 15:54:59 +0900212}
213
214void dw_pcie_msi_init(struct pcie_port *pp)
215{
Lucas Stachc8947fb2015-09-18 13:58:35 -0500216 u64 msi_target;
217
Jingoo Hanf342d942013-09-06 15:54:59 +0900218 pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
Lucas Stachc8947fb2015-09-18 13:58:35 -0500219 msi_target = virt_to_phys((void *)pp->msi_data);
Jingoo Hanf342d942013-09-06 15:54:59 +0900220
221 /* program the msi_data */
222 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
Lucas Stachc8947fb2015-09-18 13:58:35 -0500223 (u32)(msi_target & 0xffffffff));
224 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
225 (u32)(msi_target >> 32 & 0xffffffff));
Jingoo Hanf342d942013-09-06 15:54:59 +0900226}
227
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400228static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
229{
230 unsigned int res, bit, val;
231
232 res = (irq / 32) * 12;
233 bit = irq % 32;
234 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
235 val &= ~(1 << bit);
236 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
237}
238
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100239static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
Jingoo Han58275f2f2013-12-27 09:30:25 +0900240 unsigned int nvec, unsigned int pos)
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100241{
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400242 unsigned int i;
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100243
Bjorn Helgaas0b8cfb62013-12-09 15:11:25 -0700244 for (i = 0; i < nvec; i++) {
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100245 irq_set_msi_desc_off(irq_base, i, NULL);
Jingoo Han58275f2f2013-12-27 09:30:25 +0900246 /* Disable corresponding interrupt on MSI controller */
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400247 if (pp->ops->msi_clear_irq)
248 pp->ops->msi_clear_irq(pp, pos + i);
249 else
250 dw_pcie_msi_clear_irq(pp, pos + i);
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100251 }
Lucas Stachc8df6ac2014-09-30 18:36:27 +0200252
253 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100254}
255
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400256static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
257{
258 unsigned int res, bit, val;
259
260 res = (irq / 32) * 12;
261 bit = irq % 32;
262 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
263 val |= 1 << bit;
264 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
265}
266
Jingoo Hanf342d942013-09-06 15:54:59 +0900267static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
268{
Lucas Stachc8df6ac2014-09-30 18:36:27 +0200269 int irq, pos0, i;
Jiang Liue39758e2015-07-09 16:00:43 +0800270 struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(desc));
Jingoo Hanf342d942013-09-06 15:54:59 +0900271
Lucas Stachc8df6ac2014-09-30 18:36:27 +0200272 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
273 order_base_2(no_irqs));
274 if (pos0 < 0)
275 goto no_valid_irq;
Jingoo Hanf342d942013-09-06 15:54:59 +0900276
Pratyush Anand904d0e72013-10-09 21:32:12 +0900277 irq = irq_find_mapping(pp->irq_domain, pos0);
278 if (!irq)
Jingoo Hanf342d942013-09-06 15:54:59 +0900279 goto no_valid_irq;
280
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100281 /*
282 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
283 * descs so there is no need to allocate descs here. We can therefore
284 * assume that if irq_find_mapping above returns non-zero, then the
285 * descs are also successfully allocated.
286 */
287
Bjorn Helgaas0b8cfb62013-12-09 15:11:25 -0700288 for (i = 0; i < no_irqs; i++) {
Bjørn Erik Nilsenbe3f48c2013-11-29 14:35:24 +0100289 if (irq_set_msi_desc_off(irq, i, desc) != 0) {
290 clear_irq_range(pp, irq, i, pos0);
291 goto no_valid_irq;
292 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900293 /*Enable corresponding interrupt in MSI interrupt controller */
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400294 if (pp->ops->msi_set_irq)
295 pp->ops->msi_set_irq(pp, pos0 + i);
296 else
297 dw_pcie_msi_set_irq(pp, pos0 + i);
Jingoo Hanf342d942013-09-06 15:54:59 +0900298 }
299
300 *pos = pos0;
Lucas Stach79707372015-09-18 13:58:35 -0500301 desc->nvec_used = no_irqs;
302 desc->msi_attrib.multiple = order_base_2(no_irqs);
303
Jingoo Hanf342d942013-09-06 15:54:59 +0900304 return irq;
305
306no_valid_irq:
307 *pos = pos0;
308 return -ENOSPC;
309}
310
Lucas Stachea643e12015-09-18 13:58:35 -0500311static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
Jingoo Hanf342d942013-09-06 15:54:59 +0900312{
Jingoo Hanf342d942013-09-06 15:54:59 +0900313 struct msi_msg msg;
Lucas Stachc8947fb2015-09-18 13:58:35 -0500314 u64 msi_target;
Jingoo Hanf342d942013-09-06 15:54:59 +0900315
Minghuan Lian450e3442014-09-23 22:28:58 +0800316 if (pp->ops->get_msi_addr)
Lucas Stachc8947fb2015-09-18 13:58:35 -0500317 msi_target = pp->ops->get_msi_addr(pp);
Murali Karicheri2f37c5a2014-07-21 12:58:42 -0400318 else
Lucas Stachc8947fb2015-09-18 13:58:35 -0500319 msi_target = virt_to_phys((void *)pp->msi_data);
320
321 msg.address_lo = (u32)(msi_target & 0xffffffff);
322 msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
Minghuan Lian24832b42014-09-23 22:28:59 +0800323
324 if (pp->ops->get_msi_data)
325 msg.data = pp->ops->get_msi_data(pp, pos);
326 else
327 msg.data = pos;
328
Jiang Liu83a18912014-11-09 23:10:34 +0800329 pci_write_msi_msg(irq, &msg);
Lucas Stachea643e12015-09-18 13:58:35 -0500330}
331
332static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
333 struct msi_desc *desc)
334{
335 int irq, pos;
336 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
337
338 if (desc->msi_attrib.is_msix)
339 return -EINVAL;
340
341 irq = assign_irq(1, desc, &pos);
342 if (irq < 0)
343 return irq;
344
345 dw_msi_setup_msg(pp, irq, pos);
Jingoo Hanf342d942013-09-06 15:54:59 +0900346
347 return 0;
348}
349
Lucas Stach79707372015-09-18 13:58:35 -0500350static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
351 int nvec, int type)
352{
353#ifdef CONFIG_PCI_MSI
354 int irq, pos;
355 struct msi_desc *desc;
356 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
357
358 /* MSI-X interrupts are not supported */
359 if (type == PCI_CAP_ID_MSIX)
360 return -EINVAL;
361
362 WARN_ON(!list_is_singular(&pdev->dev.msi_list));
363 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
364
365 irq = assign_irq(nvec, desc, &pos);
366 if (irq < 0)
367 return irq;
368
369 dw_msi_setup_msg(pp, irq, pos);
370
371 return 0;
372#else
373 return -EINVAL;
374#endif
375}
376
Yijing Wangc2791b82014-11-11 17:45:45 -0700377static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
Jingoo Hanf342d942013-09-06 15:54:59 +0900378{
Lucas Stach91f8ae82014-09-30 18:36:26 +0200379 struct irq_data *data = irq_get_irq_data(irq);
Jiang Liuc391f262015-06-01 16:05:41 +0800380 struct msi_desc *msi = irq_data_get_msi_desc(data);
Jiang Liue39758e2015-07-09 16:00:43 +0800381 struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
Lucas Stach91f8ae82014-09-30 18:36:26 +0200382
383 clear_irq_range(pp, irq, 1, data->hwirq);
Jingoo Hanf342d942013-09-06 15:54:59 +0900384}
385
Yijing Wangc2791b82014-11-11 17:45:45 -0700386static struct msi_controller dw_pcie_msi_chip = {
Jingoo Hanf342d942013-09-06 15:54:59 +0900387 .setup_irq = dw_msi_setup_irq,
Lucas Stach79707372015-09-18 13:58:35 -0500388 .setup_irqs = dw_msi_setup_irqs,
Jingoo Hanf342d942013-09-06 15:54:59 +0900389 .teardown_irq = dw_msi_teardown_irq,
390};
391
Jingoo Han4b1ced82013-07-31 17:14:10 +0900392int dw_pcie_link_up(struct pcie_port *pp)
Jingoo Han340cba62013-06-21 16:24:54 +0900393{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900394 if (pp->ops->link_up)
395 return pp->ops->link_up(pp);
Jingoo Han340cba62013-06-21 16:24:54 +0900396 else
Jingoo Han340cba62013-06-21 16:24:54 +0900397 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900398}
399
Jingoo Hanf342d942013-09-06 15:54:59 +0900400static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
401 irq_hw_number_t hwirq)
402{
403 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
404 irq_set_chip_data(irq, domain->host_data);
Jingoo Hanf342d942013-09-06 15:54:59 +0900405
406 return 0;
407}
408
409static const struct irq_domain_ops msi_domain_ops = {
410 .map = dw_pcie_msi_map,
411};
412
Matwey V. Kornilova43f32d2015-02-19 20:41:48 +0300413int dw_pcie_host_init(struct pcie_port *pp)
Jingoo Han340cba62013-06-21 16:24:54 +0900414{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900415 struct device_node *np = pp->dev->of_node;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530416 struct platform_device *pdev = to_platform_device(pp->dev);
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530417 struct resource *cfg_res;
Zhou Wang9cdce1c2015-10-29 19:56:58 -0500418 u32 val;
419 int i, ret;
Zhou Wang0021d222015-10-29 19:57:06 -0500420 LIST_HEAD(res);
421 struct resource_entry *win;
Jingoo Hanf342d942013-09-06 15:54:59 +0900422
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530423 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
424 if (cfg_res) {
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600425 pp->cfg0_size = resource_size(cfg_res)/2;
426 pp->cfg1_size = resource_size(cfg_res)/2;
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530427 pp->cfg0_base = cfg_res->start;
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600428 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
Murali Karicheri0f414212015-07-21 17:54:11 -0400429 } else if (!pp->va_cfg0_base) {
Kishon Vijay Abraham I4dd964d2014-07-17 14:30:40 +0530430 dev_err(pp->dev, "missing *config* reg space\n");
431 }
432
Zhou Wang0021d222015-10-29 19:57:06 -0500433 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
434 if (ret)
435 return ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900436
437 /* Get the I/O and memory ranges from DT */
Zhou Wang0021d222015-10-29 19:57:06 -0500438 resource_list_for_each_entry(win, &res) {
439 switch (resource_type(win->res)) {
440 case IORESOURCE_IO:
441 pp->io = win->res;
442 pp->io->name = "I/O";
443 pp->io_size = resource_size(pp->io);
444 pp->io_bus_addr = pp->io->start - win->offset;
445 pp->io->start = max_t(resource_size_t, PCIBIOS_MIN_IO,
446 pp->io_bus_addr +
447 global_io_offset);
448 pp->io->end = min_t(resource_size_t, IO_SPACE_LIMIT,
449 pp->io_bus_addr + pp->io_size +
450 global_io_offset - 1);
451 pp->io_base = pp->io->start;
452 pp->io_base_tmp = pp->io->start;
453 break;
454 case IORESOURCE_MEM:
455 pp->mem = win->res;
456 pp->mem->name = "MEM";
457 pp->mem_size = resource_size(pp->mem);
458 pp->mem_bus_addr = pp->mem->start - win->offset;
459 break;
460 case 0:
461 pp->cfg = win->res;
462 pp->cfg0_size = resource_size(pp->cfg)/2;
463 pp->cfg1_size = resource_size(pp->cfg)/2;
464 pp->cfg0_base = pp->cfg->start;
465 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
466 break;
467 case IORESOURCE_BUS:
468 pp->busn = win->res;
469 break;
470 default:
471 continue;
Jingoo Han340cba62013-06-21 16:24:54 +0900472 }
Lucas Stach4f2ebe02014-07-23 19:52:38 +0200473 }
474
Jingoo Han4b1ced82013-07-31 17:14:10 +0900475 if (!pp->dbi_base) {
Zhou Wang0021d222015-10-29 19:57:06 -0500476 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
477 resource_size(pp->cfg));
Jingoo Han4b1ced82013-07-31 17:14:10 +0900478 if (!pp->dbi_base) {
479 dev_err(pp->dev, "error with ioremap\n");
480 return -ENOMEM;
481 }
Jingoo Han340cba62013-06-21 16:24:54 +0900482 }
Jingoo Han340cba62013-06-21 16:24:54 +0900483
Zhou Wang0021d222015-10-29 19:57:06 -0500484 pp->mem_base = pp->mem->start;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900485
Jingoo Han4b1ced82013-07-31 17:14:10 +0900486 if (!pp->va_cfg0_base) {
Murali Karicherib14a3d12014-07-23 14:54:51 -0400487 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600488 pp->cfg0_size);
Murali Karicherib14a3d12014-07-23 14:54:51 -0400489 if (!pp->va_cfg0_base) {
490 dev_err(pp->dev, "error with ioremap in function\n");
491 return -ENOMEM;
492 }
Jingoo Han340cba62013-06-21 16:24:54 +0900493 }
Murali Karicherib14a3d12014-07-23 14:54:51 -0400494
Jingoo Han4b1ced82013-07-31 17:14:10 +0900495 if (!pp->va_cfg1_base) {
Murali Karicherib14a3d12014-07-23 14:54:51 -0400496 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600497 pp->cfg1_size);
Murali Karicherib14a3d12014-07-23 14:54:51 -0400498 if (!pp->va_cfg1_base) {
499 dev_err(pp->dev, "error with ioremap\n");
500 return -ENOMEM;
501 }
Jingoo Han4b1ced82013-07-31 17:14:10 +0900502 }
Jingoo Han340cba62013-06-21 16:24:54 +0900503
Gabriele Paoloni907fce02015-09-29 00:03:10 +0800504 ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
505 if (ret)
506 pp->lanes = 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900507
Jingoo Hanf342d942013-09-06 15:54:59 +0900508 if (IS_ENABLED(CONFIG_PCI_MSI)) {
Murali Karicherib14a3d12014-07-23 14:54:51 -0400509 if (!pp->ops->msi_host_init) {
510 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
511 MAX_MSI_IRQS, &msi_domain_ops,
512 &dw_pcie_msi_chip);
513 if (!pp->irq_domain) {
514 dev_err(pp->dev, "irq domain init failed\n");
515 return -ENXIO;
516 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900517
Murali Karicherib14a3d12014-07-23 14:54:51 -0400518 for (i = 0; i < MAX_MSI_IRQS; i++)
519 irq_create_mapping(pp->irq_domain, i);
520 } else {
521 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
522 if (ret < 0)
523 return ret;
524 }
Jingoo Hanf342d942013-09-06 15:54:59 +0900525 }
526
Jingoo Han4b1ced82013-07-31 17:14:10 +0900527 if (pp->ops->host_init)
528 pp->ops->host_init(pp);
Jingoo Han340cba62013-06-21 16:24:54 +0900529
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800530 if (!pp->ops->rd_other_conf)
531 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
Zhou Wang9cdce1c2015-10-29 19:56:58 -0500532 PCIE_ATU_TYPE_MEM, pp->mem_base,
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800533 pp->mem_bus_addr, pp->mem_size);
534
Jingoo Han4b1ced82013-07-31 17:14:10 +0900535 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
536
537 /* program correct class for RC */
538 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
539
540 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
541 val |= PORT_LOGIC_SPEED_CHANGE;
542 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
543
Yijing Wang0815f952014-11-11 15:38:07 -0700544#ifdef CONFIG_PCI_MSI
545 dw_pcie_msi_chip.dev = pp->dev;
Yijing Wang0815f952014-11-11 15:38:07 -0700546#endif
547
Jingoo Han4b1ced82013-07-31 17:14:10 +0900548 dw_pci.nr_controllers = 1;
549 dw_pci.private_data = (void **)&pp;
550
Lucas Stach804f57b2014-03-05 14:25:51 +0100551 pci_common_init_dev(pp->dev, &dw_pci);
Jingoo Han340cba62013-06-21 16:24:54 +0900552
Jingoo Han340cba62013-06-21 16:24:54 +0900553 return 0;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900554}
Jingoo Han340cba62013-06-21 16:24:54 +0900555
Jingoo Han4b1ced82013-07-31 17:14:10 +0900556static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
557 u32 devfn, int where, int size, u32 *val)
558{
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800559 int ret, type;
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500560 u32 busdev, cfg_size;
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800561 u64 cpu_addr;
562 void __iomem *va_cfg_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900563
564 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
565 PCIE_ATU_FUNC(PCI_FUNC(devfn));
Jingoo Han4b1ced82013-07-31 17:14:10 +0900566
567 if (bus->parent->number == pp->root_bus_nr) {
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800568 type = PCIE_ATU_TYPE_CFG0;
Zhou Wang9cdce1c2015-10-29 19:56:58 -0500569 cpu_addr = pp->cfg0_base;
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800570 cfg_size = pp->cfg0_size;
571 va_cfg_base = pp->va_cfg0_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900572 } else {
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800573 type = PCIE_ATU_TYPE_CFG1;
Zhou Wang9cdce1c2015-10-29 19:56:58 -0500574 cpu_addr = pp->cfg1_base;
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800575 cfg_size = pp->cfg1_size;
576 va_cfg_base = pp->va_cfg1_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900577 }
578
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800579 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
580 type, cpu_addr,
581 busdev, cfg_size);
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500582 ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800583 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
Zhou Wang9cdce1c2015-10-29 19:56:58 -0500584 PCIE_ATU_TYPE_IO, pp->io_base,
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800585 pp->io_bus_addr, pp->io_size);
586
Jingoo Han340cba62013-06-21 16:24:54 +0900587 return ret;
588}
589
Jingoo Han4b1ced82013-07-31 17:14:10 +0900590static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
591 u32 devfn, int where, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +0900592{
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800593 int ret, type;
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500594 u32 busdev, cfg_size;
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800595 u64 cpu_addr;
596 void __iomem *va_cfg_base;
Jingoo Han340cba62013-06-21 16:24:54 +0900597
Jingoo Han4b1ced82013-07-31 17:14:10 +0900598 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
599 PCIE_ATU_FUNC(PCI_FUNC(devfn));
Jingoo Han340cba62013-06-21 16:24:54 +0900600
Jingoo Han4b1ced82013-07-31 17:14:10 +0900601 if (bus->parent->number == pp->root_bus_nr) {
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800602 type = PCIE_ATU_TYPE_CFG0;
Zhou Wang9cdce1c2015-10-29 19:56:58 -0500603 cpu_addr = pp->cfg0_base;
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800604 cfg_size = pp->cfg0_size;
605 va_cfg_base = pp->va_cfg0_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900606 } else {
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800607 type = PCIE_ATU_TYPE_CFG1;
Zhou Wang9cdce1c2015-10-29 19:56:58 -0500608 cpu_addr = pp->cfg1_base;
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800609 cfg_size = pp->cfg1_size;
610 va_cfg_base = pp->va_cfg1_base;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900611 }
612
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800613 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
614 type, cpu_addr,
615 busdev, cfg_size);
Gabriele Paoloni4c458522015-10-08 14:27:48 -0500616 ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800617 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
Zhou Wang9cdce1c2015-10-29 19:56:58 -0500618 PCIE_ATU_TYPE_IO, pp->io_base,
Jisheng Zhang2d91b492015-04-30 16:22:29 +0800619 pp->io_bus_addr, pp->io_size);
620
Jingoo Han4b1ced82013-07-31 17:14:10 +0900621 return ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900622}
623
Jingoo Han4b1ced82013-07-31 17:14:10 +0900624static int dw_pcie_valid_config(struct pcie_port *pp,
625 struct pci_bus *bus, int dev)
Jingoo Han340cba62013-06-21 16:24:54 +0900626{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900627 /* If there is no link, then there is no device */
628 if (bus->number != pp->root_bus_nr) {
629 if (!dw_pcie_link_up(pp))
630 return 0;
631 }
Jingoo Han340cba62013-06-21 16:24:54 +0900632
Jingoo Han4b1ced82013-07-31 17:14:10 +0900633 /* access only one slot on each root port */
634 if (bus->number == pp->root_bus_nr && dev > 0)
635 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900636
637 /*
Jingoo Han4b1ced82013-07-31 17:14:10 +0900638 * do not read more than one device on the bus directly attached
639 * to RC's (Virtual Bridge's) DS side.
Jingoo Han340cba62013-06-21 16:24:54 +0900640 */
Jingoo Han4b1ced82013-07-31 17:14:10 +0900641 if (bus->primary == pp->root_bus_nr && dev > 0)
Jingoo Han340cba62013-06-21 16:24:54 +0900642 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900643
644 return 1;
645}
646
Jingoo Han4b1ced82013-07-31 17:14:10 +0900647static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
648 int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900649{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900650 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900651 int ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900652
Jingoo Han4b1ced82013-07-31 17:14:10 +0900653 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
654 *val = 0xffffffff;
655 return PCIBIOS_DEVICE_NOT_FOUND;
656 }
657
Jingoo Han4b1ced82013-07-31 17:14:10 +0900658 if (bus->number != pp->root_bus_nr)
Murali Karicheria1c0ae92014-07-21 12:58:41 -0400659 if (pp->ops->rd_other_conf)
660 ret = pp->ops->rd_other_conf(pp, bus, devfn,
661 where, size, val);
662 else
663 ret = dw_pcie_rd_other_conf(pp, bus, devfn,
Jingoo Han4b1ced82013-07-31 17:14:10 +0900664 where, size, val);
665 else
666 ret = dw_pcie_rd_own_conf(pp, where, size, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900667
668 return ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900669}
Jingoo Han4b1ced82013-07-31 17:14:10 +0900670
671static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
672 int where, int size, u32 val)
673{
674 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900675 int ret;
676
Jingoo Han4b1ced82013-07-31 17:14:10 +0900677 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
678 return PCIBIOS_DEVICE_NOT_FOUND;
679
Jingoo Han4b1ced82013-07-31 17:14:10 +0900680 if (bus->number != pp->root_bus_nr)
Murali Karicheria1c0ae92014-07-21 12:58:41 -0400681 if (pp->ops->wr_other_conf)
682 ret = pp->ops->wr_other_conf(pp, bus, devfn,
683 where, size, val);
684 else
685 ret = dw_pcie_wr_other_conf(pp, bus, devfn,
Jingoo Han4b1ced82013-07-31 17:14:10 +0900686 where, size, val);
687 else
688 ret = dw_pcie_wr_own_conf(pp, where, size, val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900689
690 return ret;
691}
692
693static struct pci_ops dw_pcie_ops = {
694 .read = dw_pcie_rd_conf,
695 .write = dw_pcie_wr_conf,
696};
697
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600698static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900699{
700 struct pcie_port *pp;
701
702 pp = sys_to_pcie(sys);
703
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600704 if (global_io_offset < SZ_1M && pp->io_size > 0) {
705 sys->io_offset = global_io_offset - pp->io_bus_addr;
Zhou Wang9cdce1c2015-10-29 19:56:58 -0500706 pci_ioremap_io(global_io_offset, pp->io_base_tmp);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900707 global_io_offset += SZ_64K;
Zhou Wang0021d222015-10-29 19:57:06 -0500708 pci_add_resource_offset(&sys->resources, pp->io,
Jingoo Han4b1ced82013-07-31 17:14:10 +0900709 sys->io_offset);
710 }
711
Zhou Wang0021d222015-10-29 19:57:06 -0500712 sys->mem_offset = pp->mem->start - pp->mem_bus_addr;
713 pci_add_resource_offset(&sys->resources, pp->mem, sys->mem_offset);
714 pci_add_resource(&sys->resources, pp->busn);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900715
716 return 1;
717}
718
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600719static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900720{
721 struct pci_bus *bus;
722 struct pcie_port *pp = sys_to_pcie(sys);
723
Lucas Stach92483df2014-07-23 19:52:39 +0200724 pp->root_bus_nr = sys->busnr;
Lorenzo Pieralisi8953aab2015-07-29 12:33:18 +0100725
726 if (IS_ENABLED(CONFIG_PCI_MSI))
727 bus = pci_scan_root_bus_msi(pp->dev, sys->busnr, &dw_pcie_ops,
728 sys, &sys->resources,
729 &dw_pcie_msi_chip);
730 else
731 bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
732 sys, &sys->resources);
733
Lucas Stach92483df2014-07-23 19:52:39 +0200734 if (!bus)
735 return NULL;
736
Murali Karicherib14a3d12014-07-23 14:54:51 -0400737 if (bus && pp->ops->scan_bus)
738 pp->ops->scan_bus(pp);
739
Jingoo Han4b1ced82013-07-31 17:14:10 +0900740 return bus;
741}
742
Bjorn Helgaas73e40852013-10-09 09:12:37 -0600743static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
Jingoo Han4b1ced82013-07-31 17:14:10 +0900744{
745 struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
Lucas Stach804f57b2014-03-05 14:25:51 +0100746 int irq;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900747
Lucas Stach804f57b2014-03-05 14:25:51 +0100748 irq = of_irq_parse_and_map_pci(dev, slot, pin);
749 if (!irq)
750 irq = pp->irq;
751
752 return irq;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900753}
754
755static struct hw_pci dw_pci = {
756 .setup = dw_pcie_setup,
757 .scan = dw_pcie_scan_bus,
758 .map_irq = dw_pcie_map_irq,
759};
760
761void dw_pcie_setup_rc(struct pcie_port *pp)
762{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900763 u32 val;
764 u32 membase;
765 u32 memlimit;
766
Mohit Kumar66c5c342014-04-14 14:22:54 -0600767 /* set the number of lanes */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900768 dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900769 val &= ~PORT_LINK_MODE_MASK;
770 switch (pp->lanes) {
771 case 1:
772 val |= PORT_LINK_MODE_1_LANES;
773 break;
774 case 2:
775 val |= PORT_LINK_MODE_2_LANES;
776 break;
777 case 4:
778 val |= PORT_LINK_MODE_4_LANES;
779 break;
Zhou Wang5b0f0732015-05-13 14:44:34 +0800780 case 8:
781 val |= PORT_LINK_MODE_8_LANES;
782 break;
Gabriele Paoloni907fce02015-09-29 00:03:10 +0800783 default:
784 dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
785 return;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900786 }
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900787 dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900788
789 /* set link width speed control register */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900790 dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900791 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
792 switch (pp->lanes) {
793 case 1:
794 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
795 break;
796 case 2:
797 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
798 break;
799 case 4:
800 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
801 break;
Zhou Wang5b0f0732015-05-13 14:44:34 +0800802 case 8:
803 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
804 break;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900805 }
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900806 dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900807
808 /* setup RC BARs */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900809 dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
Mohit Kumardbffdd62014-02-19 17:34:35 +0530810 dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900811
812 /* setup interrupt pins */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900813 dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900814 val &= 0xffff00ff;
815 val |= 0x00000100;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900816 dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900817
818 /* setup bus numbers */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900819 dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900820 val &= 0xff000000;
821 val |= 0x00010100;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900822 dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900823
824 /* setup memory base, memory limit */
825 membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
Pratyush Anandadf70fc2014-09-05 17:48:54 -0600826 memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900827 val = memlimit | membase;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900828 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900829
830 /* setup command register */
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900831 dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900832 val &= 0xffff0000;
833 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
834 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
Seungwon Jeonf7b78682013-08-28 20:53:30 +0900835 dw_pcie_writel_rc(pp, val, PCI_COMMAND);
Jingoo Han4b1ced82013-07-31 17:14:10 +0900836}
Jingoo Han340cba62013-06-21 16:24:54 +0900837
838MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
Jingoo Han4b1ced82013-07-31 17:14:10 +0900839MODULE_DESCRIPTION("Designware PCIe host controller driver");
Jingoo Han340cba62013-06-21 16:24:54 +0900840MODULE_LICENSE("GPL v2");