blob: a28c496f58ac384f8449ee4af6e694450f7a5666 [file] [log] [blame]
Bjorn Helgaas8cfab3c2018-01-26 12:50:27 -06001// SPDX-License-Identifier: GPL-2.0
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +05302/*
Bjorn Helgaas96291d52017-09-01 16:35:50 -05003 * Synopsys DesignWare PCIe host controller driver
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +05304 *
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
7 *
8 * Author: Jingoo Han <jg1.han@samsung.com>
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +05309 */
10
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +000011#include <linux/irqchip/chained_irq.h>
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +053012#include <linux/irqdomain.h>
13#include <linux/of_address.h>
14#include <linux/of_pci.h>
15#include <linux/pci_regs.h>
16#include <linux/platform_device.h>
17
18#include "pcie-designware.h"
19
20static struct pci_ops dw_pcie_ops;
21
22static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
23 u32 *val)
24{
25 struct dw_pcie *pci;
26
27 if (pp->ops->rd_own_conf)
28 return pp->ops->rd_own_conf(pp, where, size, val);
29
30 pci = to_dw_pcie_from_pp(pp);
31 return dw_pcie_read(pci->dbi_base + where, size, val);
32}
33
34static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
35 u32 val)
36{
37 struct dw_pcie *pci;
38
39 if (pp->ops->wr_own_conf)
40 return pp->ops->wr_own_conf(pp, where, size, val);
41
42 pci = to_dw_pcie_from_pp(pp);
43 return dw_pcie_write(pci->dbi_base + where, size, val);
44}
45
46static struct irq_chip dw_msi_irq_chip = {
47 .name = "PCI-MSI",
48 .irq_enable = pci_msi_unmask_irq,
49 .irq_disable = pci_msi_mask_irq,
50 .irq_mask = pci_msi_mask_irq,
51 .irq_unmask = pci_msi_unmask_irq,
52};
53
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +000054static void dw_msi_ack_irq(struct irq_data *d)
55{
56 irq_chip_ack_parent(d);
57}
58
59static void dw_msi_mask_irq(struct irq_data *d)
60{
61 pci_msi_mask_irq(d);
62 irq_chip_mask_parent(d);
63}
64
65static void dw_msi_unmask_irq(struct irq_data *d)
66{
67 pci_msi_unmask_irq(d);
68 irq_chip_unmask_parent(d);
69}
70
71static struct irq_chip dw_pcie_msi_irq_chip = {
72 .name = "PCI-MSI",
73 .irq_ack = dw_msi_ack_irq,
74 .irq_mask = dw_msi_mask_irq,
75 .irq_unmask = dw_msi_unmask_irq,
76};
77
78static struct msi_domain_info dw_pcie_msi_domain_info = {
79 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
80 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
81 .chip = &dw_pcie_msi_irq_chip,
82};
83
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +053084/* MSI int handler */
85irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
86{
Dan Carpenter1b497e62017-03-16 14:34:51 -050087 u32 val;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +053088 int i, pos, irq;
89 irqreturn_t ret = IRQ_NONE;
90
91 for (i = 0; i < MAX_MSI_CTRLS; i++) {
92 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
Dan Carpenter1b497e62017-03-16 14:34:51 -050093 &val);
Bjorn Helgaasdbe4a092017-03-16 14:34:59 -050094 if (!val)
95 continue;
96
97 ret = IRQ_HANDLED;
98 pos = 0;
Dan Carpenter1b497e62017-03-16 14:34:51 -050099 while ((pos = find_next_bit((unsigned long *) &val, 32,
100 pos)) != 32) {
Bjorn Helgaasdbe4a092017-03-16 14:34:59 -0500101 irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
Faiz Abbas8c934092017-08-10 16:54:55 +0530102 generic_handle_irq(irq);
Bjorn Helgaasdbe4a092017-03-16 14:34:59 -0500103 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
104 4, 1 << pos);
Bjorn Helgaasdbe4a092017-03-16 14:34:59 -0500105 pos++;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530106 }
107 }
108
109 return ret;
110}
111
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000112/* Chained MSI interrupt service routine */
113static void dw_chained_msi_isr(struct irq_desc *desc)
114{
115 struct irq_chip *chip = irq_desc_get_chip(desc);
116 struct pcie_port *pp;
117
118 chained_irq_enter(chip, desc);
119
120 pp = irq_desc_get_handler_data(desc);
121 dw_handle_msi_irq(pp);
122
123 chained_irq_exit(chip, desc);
124}
125
126static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
127{
128 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
129 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
130 u64 msi_target;
131
132 if (pp->ops->get_msi_addr)
133 msi_target = pp->ops->get_msi_addr(pp);
134 else
135 msi_target = (u64)pp->msi_data;
136
137 msg->address_lo = lower_32_bits(msi_target);
138 msg->address_hi = upper_32_bits(msi_target);
139
140 if (pp->ops->get_msi_data)
141 msg->data = pp->ops->get_msi_data(pp, data->hwirq);
142 else
143 msg->data = data->hwirq;
144
145 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
146 (int)data->hwirq, msg->address_hi, msg->address_lo);
147}
148
149static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
150 const struct cpumask *mask, bool force)
151{
152 return -EINVAL;
153}
154
155static void dw_pci_bottom_mask(struct irq_data *data)
156{
157 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
158 unsigned int res, bit, ctrl;
159 unsigned long flags;
160
161 raw_spin_lock_irqsave(&pp->lock, flags);
162
163 if (pp->ops->msi_clear_irq) {
164 pp->ops->msi_clear_irq(pp, data->hwirq);
165 } else {
166 ctrl = data->hwirq / 32;
167 res = ctrl * 12;
168 bit = data->hwirq % 32;
169
170 pp->irq_status[ctrl] &= ~(1 << bit);
171 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
172 pp->irq_status[ctrl]);
173 }
174
175 raw_spin_unlock_irqrestore(&pp->lock, flags);
176}
177
178static void dw_pci_bottom_unmask(struct irq_data *data)
179{
180 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
181 unsigned int res, bit, ctrl;
182 unsigned long flags;
183
184 raw_spin_lock_irqsave(&pp->lock, flags);
185
186 if (pp->ops->msi_set_irq) {
187 pp->ops->msi_set_irq(pp, data->hwirq);
188 } else {
189 ctrl = data->hwirq / 32;
190 res = ctrl * 12;
191 bit = data->hwirq % 32;
192
193 pp->irq_status[ctrl] |= 1 << bit;
194 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
195 pp->irq_status[ctrl]);
196 }
197
198 raw_spin_unlock_irqrestore(&pp->lock, flags);
199}
200
201static void dw_pci_bottom_ack(struct irq_data *d)
202{
203 struct msi_desc *msi = irq_data_get_msi_desc(d);
204 struct pcie_port *pp;
205
206 pp = msi_desc_to_pci_sysdata(msi);
207
208 if (pp->ops->msi_irq_ack)
209 pp->ops->msi_irq_ack(d->hwirq, pp);
210}
211
212static struct irq_chip dw_pci_msi_bottom_irq_chip = {
213 .name = "DWPCI-MSI",
214 .irq_ack = dw_pci_bottom_ack,
215 .irq_compose_msi_msg = dw_pci_setup_msi_msg,
216 .irq_set_affinity = dw_pci_msi_set_affinity,
217 .irq_mask = dw_pci_bottom_mask,
218 .irq_unmask = dw_pci_bottom_unmask,
219};
220
221static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
222 unsigned int virq, unsigned int nr_irqs,
223 void *args)
224{
225 struct pcie_port *pp = domain->host_data;
226 unsigned long flags;
227 u32 i;
228 int bit;
229
230 raw_spin_lock_irqsave(&pp->lock, flags);
231
232 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
233 order_base_2(nr_irqs));
234
235 raw_spin_unlock_irqrestore(&pp->lock, flags);
236
237 if (bit < 0)
238 return -ENOSPC;
239
240 for (i = 0; i < nr_irqs; i++)
241 irq_domain_set_info(domain, virq + i, bit + i,
242 &dw_pci_msi_bottom_irq_chip,
243 pp, handle_edge_irq,
244 NULL, NULL);
245
246 return 0;
247}
248
249static void dw_pcie_irq_domain_free(struct irq_domain *domain,
250 unsigned int virq, unsigned int nr_irqs)
251{
252 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
253 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
254 unsigned long flags;
255
256 raw_spin_lock_irqsave(&pp->lock, flags);
257 bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
258 order_base_2(nr_irqs));
259 raw_spin_unlock_irqrestore(&pp->lock, flags);
260}
261
262static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
263 .alloc = dw_pcie_irq_domain_alloc,
264 .free = dw_pcie_irq_domain_free,
265};
266
267int dw_pcie_allocate_domains(struct pcie_port *pp)
268{
269 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
270 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
271
272 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
273 &dw_pcie_msi_domain_ops, pp);
274 if (!pp->irq_domain) {
275 dev_err(pci->dev, "failed to create IRQ domain\n");
276 return -ENOMEM;
277 }
278
279 pp->msi_domain = pci_msi_create_irq_domain(fwnode,
280 &dw_pcie_msi_domain_info,
281 pp->irq_domain);
282 if (!pp->msi_domain) {
283 dev_err(pci->dev, "failed to create MSI domain\n");
284 irq_domain_remove(pp->irq_domain);
285 return -ENOMEM;
286 }
287
288 return 0;
289}
290
291void dw_pcie_free_msi(struct pcie_port *pp)
292{
293 irq_set_chained_handler(pp->msi_irq, NULL);
294 irq_set_handler_data(pp->msi_irq, NULL);
295
296 irq_domain_remove(pp->msi_domain);
297 irq_domain_remove(pp->irq_domain);
298}
299
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530300void dw_pcie_msi_init(struct pcie_port *pp)
301{
Niklas Cassel111111a2017-12-20 00:29:22 +0100302 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
303 struct device *dev = pci->dev;
304 struct page *page;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530305 u64 msi_target;
306
Niklas Cassel111111a2017-12-20 00:29:22 +0100307 page = alloc_page(GFP_KERNEL);
308 pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
309 if (dma_mapping_error(dev, pp->msi_data)) {
310 dev_err(dev, "failed to map MSI data\n");
311 __free_page(page);
312 return;
313 }
314 msi_target = (u64)pp->msi_data;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530315
316 /* program the msi_data */
317 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000318 lower_32_bits(msi_target));
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530319 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000320 upper_32_bits(msi_target));
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530321}
322
323static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
324{
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000325 unsigned int res, bit, ctrl;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530326
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000327 ctrl = irq / 32;
328 res = ctrl * 12;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530329 bit = irq % 32;
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000330 pp->irq_status[ctrl] &= ~(1 << bit);
331 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
332 pp->irq_status[ctrl]);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530333}
334
335static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
336 unsigned int nvec, unsigned int pos)
337{
338 unsigned int i;
339
340 for (i = 0; i < nvec; i++) {
341 irq_set_msi_desc_off(irq_base, i, NULL);
342 /* Disable corresponding interrupt on MSI controller */
343 if (pp->ops->msi_clear_irq)
344 pp->ops->msi_clear_irq(pp, pos + i);
345 else
346 dw_pcie_msi_clear_irq(pp, pos + i);
347 }
348
349 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
350}
351
352static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
353{
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000354 unsigned int res, bit, ctrl;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530355
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000356 ctrl = irq / 32;
357 res = ctrl * 12;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530358 bit = irq % 32;
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000359 pp->irq_status[ctrl] |= 1 << bit;
360 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
361 pp->irq_status[ctrl]);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530362}
363
364static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
365{
366 int irq, pos0, i;
367 struct pcie_port *pp;
368
369 pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
370 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
371 order_base_2(no_irqs));
372 if (pos0 < 0)
373 goto no_valid_irq;
374
375 irq = irq_find_mapping(pp->irq_domain, pos0);
376 if (!irq)
377 goto no_valid_irq;
378
379 /*
380 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
381 * descs so there is no need to allocate descs here. We can therefore
382 * assume that if irq_find_mapping above returns non-zero, then the
383 * descs are also successfully allocated.
384 */
385
386 for (i = 0; i < no_irqs; i++) {
387 if (irq_set_msi_desc_off(irq, i, desc) != 0) {
388 clear_irq_range(pp, irq, i, pos0);
389 goto no_valid_irq;
390 }
391 /*Enable corresponding interrupt in MSI interrupt controller */
392 if (pp->ops->msi_set_irq)
393 pp->ops->msi_set_irq(pp, pos0 + i);
394 else
395 dw_pcie_msi_set_irq(pp, pos0 + i);
396 }
397
398 *pos = pos0;
399 desc->nvec_used = no_irqs;
400 desc->msi_attrib.multiple = order_base_2(no_irqs);
401
402 return irq;
403
404no_valid_irq:
405 *pos = pos0;
406 return -ENOSPC;
407}
408
409static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
410{
411 struct msi_msg msg;
412 u64 msi_target;
413
414 if (pp->ops->get_msi_addr)
415 msi_target = pp->ops->get_msi_addr(pp);
416 else
Niklas Cassel111111a2017-12-20 00:29:22 +0100417 msi_target = (u64)pp->msi_data;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530418
419 msg.address_lo = (u32)(msi_target & 0xffffffff);
420 msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
421
422 if (pp->ops->get_msi_data)
423 msg.data = pp->ops->get_msi_data(pp, pos);
424 else
425 msg.data = pos;
426
427 pci_write_msi_msg(irq, &msg);
428}
429
430static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
431 struct msi_desc *desc)
432{
433 int irq, pos;
434 struct pcie_port *pp = pdev->bus->sysdata;
435
436 if (desc->msi_attrib.is_msix)
437 return -EINVAL;
438
439 irq = assign_irq(1, desc, &pos);
440 if (irq < 0)
441 return irq;
442
443 dw_msi_setup_msg(pp, irq, pos);
444
445 return 0;
446}
447
448static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
449 int nvec, int type)
450{
451#ifdef CONFIG_PCI_MSI
452 int irq, pos;
453 struct msi_desc *desc;
454 struct pcie_port *pp = pdev->bus->sysdata;
455
456 /* MSI-X interrupts are not supported */
457 if (type == PCI_CAP_ID_MSIX)
458 return -EINVAL;
459
460 WARN_ON(!list_is_singular(&pdev->dev.msi_list));
461 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
462
463 irq = assign_irq(nvec, desc, &pos);
464 if (irq < 0)
465 return irq;
466
467 dw_msi_setup_msg(pp, irq, pos);
468
469 return 0;
470#else
471 return -EINVAL;
472#endif
473}
474
475static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
476{
477 struct irq_data *data = irq_get_irq_data(irq);
478 struct msi_desc *msi = irq_data_get_msi_desc(data);
479 struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
480
481 clear_irq_range(pp, irq, 1, data->hwirq);
482}
483
484static struct msi_controller dw_pcie_msi_chip = {
485 .setup_irq = dw_msi_setup_irq,
486 .setup_irqs = dw_msi_setup_irqs,
487 .teardown_irq = dw_msi_teardown_irq,
488};
489
490static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
491 irq_hw_number_t hwirq)
492{
493 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
494 irq_set_chip_data(irq, domain->host_data);
495
496 return 0;
497}
498
499static const struct irq_domain_ops msi_domain_ops = {
500 .map = dw_pcie_msi_map,
501};
502
503int dw_pcie_host_init(struct pcie_port *pp)
504{
505 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
506 struct device *dev = pci->dev;
507 struct device_node *np = dev->of_node;
508 struct platform_device *pdev = to_platform_device(dev);
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000509 struct resource_entry *win, *tmp;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530510 struct pci_bus *bus, *child;
Lorenzo Pieralisi295aeb92017-06-28 15:13:56 -0500511 struct pci_host_bridge *bridge;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530512 struct resource *cfg_res;
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000513 int ret;
514
515 raw_spin_lock_init(&pci->pp.lock);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530516
517 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
518 if (cfg_res) {
519 pp->cfg0_size = resource_size(cfg_res) / 2;
520 pp->cfg1_size = resource_size(cfg_res) / 2;
521 pp->cfg0_base = cfg_res->start;
522 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
523 } else if (!pp->va_cfg0_base) {
524 dev_err(dev, "missing *config* reg space\n");
525 }
526
Lorenzo Pieralisi295aeb92017-06-28 15:13:56 -0500527 bridge = pci_alloc_host_bridge(0);
528 if (!bridge)
529 return -ENOMEM;
530
531 ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
532 &bridge->windows, &pp->io_base);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530533 if (ret)
534 return ret;
535
Lorenzo Pieralisi295aeb92017-06-28 15:13:56 -0500536 ret = devm_request_pci_bus_resources(dev, &bridge->windows);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530537 if (ret)
538 goto error;
539
540 /* Get the I/O and memory ranges from DT */
Lorenzo Pieralisi295aeb92017-06-28 15:13:56 -0500541 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530542 switch (resource_type(win->res)) {
543 case IORESOURCE_IO:
544 ret = pci_remap_iospace(win->res, pp->io_base);
545 if (ret) {
546 dev_warn(dev, "error %d: failed to map resource %pR\n",
547 ret, win->res);
548 resource_list_destroy_entry(win);
549 } else {
550 pp->io = win->res;
551 pp->io->name = "I/O";
552 pp->io_size = resource_size(pp->io);
553 pp->io_bus_addr = pp->io->start - win->offset;
554 }
555 break;
556 case IORESOURCE_MEM:
557 pp->mem = win->res;
558 pp->mem->name = "MEM";
559 pp->mem_size = resource_size(pp->mem);
560 pp->mem_bus_addr = pp->mem->start - win->offset;
561 break;
562 case 0:
563 pp->cfg = win->res;
564 pp->cfg0_size = resource_size(pp->cfg) / 2;
565 pp->cfg1_size = resource_size(pp->cfg) / 2;
566 pp->cfg0_base = pp->cfg->start;
567 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
568 break;
569 case IORESOURCE_BUS:
570 pp->busn = win->res;
571 break;
572 }
573 }
574
575 if (!pci->dbi_base) {
Lorenzo Pieralisicc7b0d42017-04-19 17:49:03 +0100576 pci->dbi_base = devm_pci_remap_cfgspace(dev,
577 pp->cfg->start,
578 resource_size(pp->cfg));
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530579 if (!pci->dbi_base) {
580 dev_err(dev, "error with ioremap\n");
581 ret = -ENOMEM;
582 goto error;
583 }
584 }
585
586 pp->mem_base = pp->mem->start;
587
588 if (!pp->va_cfg0_base) {
Lorenzo Pieralisicc7b0d42017-04-19 17:49:03 +0100589 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
590 pp->cfg0_base, pp->cfg0_size);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530591 if (!pp->va_cfg0_base) {
592 dev_err(dev, "error with ioremap in function\n");
593 ret = -ENOMEM;
594 goto error;
595 }
596 }
597
598 if (!pp->va_cfg1_base) {
Lorenzo Pieralisicc7b0d42017-04-19 17:49:03 +0100599 pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
600 pp->cfg1_base,
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530601 pp->cfg1_size);
602 if (!pp->va_cfg1_base) {
603 dev_err(dev, "error with ioremap\n");
604 ret = -ENOMEM;
605 goto error;
606 }
607 }
608
609 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
610 if (ret)
611 pci->num_viewport = 2;
612
613 if (IS_ENABLED(CONFIG_PCI_MSI)) {
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000614 /*
615 * If a specific SoC driver needs to change the
616 * default number of vectors, it needs to implement
617 * the set_num_vectors callback.
618 */
619 if (!pp->ops->set_num_vectors) {
620 pp->num_vectors = MSI_DEF_NUM_VECTORS;
621 } else {
622 pp->ops->set_num_vectors(pp);
623
624 if (pp->num_vectors > MAX_MSI_IRQS ||
625 pp->num_vectors == 0) {
626 dev_err(dev,
627 "Invalid number of vectors\n");
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530628 goto error;
629 }
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000630 }
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530631
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000632 if (!pp->ops->msi_host_init) {
633 ret = dw_pcie_allocate_domains(pp);
634 if (ret)
635 goto error;
636
637 if (pp->msi_irq)
638 irq_set_chained_handler_and_data(pp->msi_irq,
639 dw_chained_msi_isr,
640 pp);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530641 } else {
642 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
643 if (ret < 0)
644 goto error;
645 }
646 }
647
Bjorn Andersson4a301762017-07-15 23:39:45 -0700648 if (pp->ops->host_init) {
649 ret = pp->ops->host_init(pp);
650 if (ret)
651 goto error;
652 }
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530653
654 pp->root_bus_nr = pp->busn->start;
Lorenzo Pieralisi295aeb92017-06-28 15:13:56 -0500655
656 bridge->dev.parent = dev;
657 bridge->sysdata = pp;
658 bridge->busnr = pp->root_bus_nr;
659 bridge->ops = &dw_pcie_ops;
Lorenzo Pieralisi60eca192017-06-28 15:14:07 -0500660 bridge->map_irq = of_irq_parse_and_map_pci;
661 bridge->swizzle_irq = pci_common_swizzle;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530662
Lorenzo Pieralisi295aeb92017-06-28 15:13:56 -0500663 ret = pci_scan_root_bus_bridge(bridge);
664 if (ret)
665 goto error;
666
667 bus = bridge->bus;
668
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530669 if (pp->ops->scan_bus)
670 pp->ops->scan_bus(pp);
671
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530672 pci_bus_size_bridges(bus);
673 pci_bus_assign_resources(bus);
674
675 list_for_each_entry(child, &bus->children, node)
676 pcie_bus_configure_settings(child);
677
678 pci_bus_add_devices(bus);
679 return 0;
680
681error:
Lorenzo Pieralisi295aeb92017-06-28 15:13:56 -0500682 pci_free_host_bridge(bridge);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530683 return ret;
684}
685
686static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
687 u32 devfn, int where, int size, u32 *val)
688{
689 int ret, type;
690 u32 busdev, cfg_size;
691 u64 cpu_addr;
692 void __iomem *va_cfg_base;
693 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
694
695 if (pp->ops->rd_other_conf)
696 return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
697
698 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
699 PCIE_ATU_FUNC(PCI_FUNC(devfn));
700
701 if (bus->parent->number == pp->root_bus_nr) {
702 type = PCIE_ATU_TYPE_CFG0;
703 cpu_addr = pp->cfg0_base;
704 cfg_size = pp->cfg0_size;
705 va_cfg_base = pp->va_cfg0_base;
706 } else {
707 type = PCIE_ATU_TYPE_CFG1;
708 cpu_addr = pp->cfg1_base;
709 cfg_size = pp->cfg1_size;
710 va_cfg_base = pp->va_cfg1_base;
711 }
712
713 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
714 type, cpu_addr,
715 busdev, cfg_size);
716 ret = dw_pcie_read(va_cfg_base + where, size, val);
717 if (pci->num_viewport <= 2)
718 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
719 PCIE_ATU_TYPE_IO, pp->io_base,
720 pp->io_bus_addr, pp->io_size);
721
722 return ret;
723}
724
725static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
726 u32 devfn, int where, int size, u32 val)
727{
728 int ret, type;
729 u32 busdev, cfg_size;
730 u64 cpu_addr;
731 void __iomem *va_cfg_base;
732 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
733
734 if (pp->ops->wr_other_conf)
735 return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
736
737 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
738 PCIE_ATU_FUNC(PCI_FUNC(devfn));
739
740 if (bus->parent->number == pp->root_bus_nr) {
741 type = PCIE_ATU_TYPE_CFG0;
742 cpu_addr = pp->cfg0_base;
743 cfg_size = pp->cfg0_size;
744 va_cfg_base = pp->va_cfg0_base;
745 } else {
746 type = PCIE_ATU_TYPE_CFG1;
747 cpu_addr = pp->cfg1_base;
748 cfg_size = pp->cfg1_size;
749 va_cfg_base = pp->va_cfg1_base;
750 }
751
752 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
753 type, cpu_addr,
754 busdev, cfg_size);
755 ret = dw_pcie_write(va_cfg_base + where, size, val);
756 if (pci->num_viewport <= 2)
757 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
758 PCIE_ATU_TYPE_IO, pp->io_base,
759 pp->io_bus_addr, pp->io_size);
760
761 return ret;
762}
763
764static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
765 int dev)
766{
767 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
768
769 /* If there is no link, then there is no device */
770 if (bus->number != pp->root_bus_nr) {
771 if (!dw_pcie_link_up(pci))
772 return 0;
773 }
774
775 /* access only one slot on each root port */
776 if (bus->number == pp->root_bus_nr && dev > 0)
777 return 0;
778
779 return 1;
780}
781
782static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
783 int size, u32 *val)
784{
785 struct pcie_port *pp = bus->sysdata;
786
787 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
788 *val = 0xffffffff;
789 return PCIBIOS_DEVICE_NOT_FOUND;
790 }
791
792 if (bus->number == pp->root_bus_nr)
793 return dw_pcie_rd_own_conf(pp, where, size, val);
794
795 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
796}
797
798static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
799 int where, int size, u32 val)
800{
801 struct pcie_port *pp = bus->sysdata;
802
803 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
804 return PCIBIOS_DEVICE_NOT_FOUND;
805
806 if (bus->number == pp->root_bus_nr)
807 return dw_pcie_wr_own_conf(pp, where, size, val);
808
809 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
810}
811
812static struct pci_ops dw_pcie_ops = {
813 .read = dw_pcie_rd_conf,
814 .write = dw_pcie_wr_conf,
815};
816
817static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
818{
819 u32 val;
820
821 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
822 if (val == 0xffffffff)
823 return 1;
824
825 return 0;
826}
827
828void dw_pcie_setup_rc(struct pcie_port *pp)
829{
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000830 u32 val, ctrl;
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530831 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
832
833 dw_pcie_setup(pci);
834
Gustavo Pimentel7c5925a2018-03-06 11:54:53 +0000835 /* Initialize IRQ Status array */
836 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
837 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4,
838 &pp->irq_status[ctrl]);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530839 /* setup RC BARs */
840 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
841 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
842
843 /* setup interrupt pins */
Hou Zhiqiangd91dfe52017-08-28 18:53:00 +0800844 dw_pcie_dbi_ro_wr_en(pci);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530845 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
846 val &= 0xffff00ff;
847 val |= 0x00000100;
848 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
Hou Zhiqiangd91dfe52017-08-28 18:53:00 +0800849 dw_pcie_dbi_ro_wr_dis(pci);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530850
851 /* setup bus numbers */
852 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
853 val &= 0xff000000;
854 val |= 0x00010100;
855 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
856
857 /* setup command register */
858 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
859 val &= 0xffff0000;
860 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
861 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
862 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
863
864 /*
865 * If the platform provides ->rd_other_conf, it means the platform
866 * uses its own address translation component rather than ATU, so
867 * we should not program the ATU here.
868 */
869 if (!pp->ops->rd_other_conf) {
870 /* get iATU unroll support */
871 pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
872 dev_dbg(pci->dev, "iATU unroll: %s\n",
873 pci->iatu_unroll_enabled ? "enabled" : "disabled");
874
875 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
876 PCIE_ATU_TYPE_MEM, pp->mem_base,
877 pp->mem_bus_addr, pp->mem_size);
878 if (pci->num_viewport > 2)
879 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
880 PCIE_ATU_TYPE_IO, pp->io_base,
881 pp->io_bus_addr, pp->io_size);
882 }
883
884 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
885
Hou Zhiqiangd91dfe52017-08-28 18:53:00 +0800886 /* Enable write permission for the DBI read-only register */
887 dw_pcie_dbi_ro_wr_en(pci);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530888 /* program correct class for RC */
889 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
Hou Zhiqiangd91dfe52017-08-28 18:53:00 +0800890 /* Better disable write permission right after the update */
891 dw_pcie_dbi_ro_wr_dis(pci);
Kishon Vijay Abraham Ifeb85d92017-02-15 18:48:17 +0530892
893 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
894 val |= PORT_LOGIC_SPEED_CHANGE;
895 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
896}