blob: 04a37dc06a772dc096e216f5f7360d96b6c28d9e [file] [log] [blame]
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001/*
2 * Support PCI/PCIe on PowerNV platforms
3 *
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +000012#undef DEBUG
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000013
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/irq.h>
21#include <linux/io.h>
22#include <linux/msi.h>
23
24#include <asm/sections.h>
25#include <asm/io.h>
26#include <asm/prom.h>
27#include <asm/pci-bridge.h>
28#include <asm/machdep.h>
Gavin Shanfb1b55d2013-03-05 21:12:37 +000029#include <asm/msi_bitmap.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000030#include <asm/ppc-pci.h>
31#include <asm/opal.h>
32#include <asm/iommu.h>
33#include <asm/tce.h>
Gavin Shan137436c2013-04-25 19:20:59 +000034#include <asm/xics.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000035
36#include "powernv.h"
37#include "pci.h"
38
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000039#define define_pe_printk_level(func, kern_level) \
40static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
41{ \
42 struct va_format vaf; \
43 va_list args; \
Gavin Shan490e0782012-10-17 19:53:30 +000044 char pfix[32]; \
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000045 int r; \
46 \
47 va_start(args, fmt); \
48 \
49 vaf.fmt = fmt; \
50 vaf.va = &args; \
51 \
Gavin Shan490e0782012-10-17 19:53:30 +000052 if (pe->pdev) \
53 strlcpy(pfix, dev_name(&pe->pdev->dev), \
54 sizeof(pfix)); \
55 else \
56 sprintf(pfix, "%04x:%02x ", \
57 pci_domain_nr(pe->pbus), \
58 pe->pbus->number); \
59 r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
60 pfix, pe->pe_number, &vaf); \
61 \
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000062 va_end(args); \
63 \
64 return r; \
65} \
66
67define_pe_printk_level(pe_err, KERN_ERR);
68define_pe_printk_level(pe_warn, KERN_WARNING);
69define_pe_printk_level(pe_info, KERN_INFO);
70
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000071static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
72{
73 struct device_node *np;
74
75 np = pci_device_to_OF_node(dev);
76 if (!np)
77 return NULL;
78 return PCI_DN(np);
79}
80
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -080081static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000082{
83 unsigned long pe;
84
85 do {
86 pe = find_next_zero_bit(phb->ioda.pe_alloc,
87 phb->ioda.total_pe, 0);
88 if (pe >= phb->ioda.total_pe)
89 return IODA_INVALID_PE;
90 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
91
Gavin Shan4cce9552013-04-25 19:21:00 +000092 phb->ioda.pe_array[pe].phb = phb;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000093 phb->ioda.pe_array[pe].pe_number = pe;
94 return pe;
95}
96
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -080097static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000098{
99 WARN_ON(phb->ioda.pe_array[pe].pdev);
100
101 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
102 clear_bit(pe, phb->ioda.pe_alloc);
103}
104
105/* Currently those 2 are only used when MSIs are enabled, this will change
106 * but in the meantime, we need to protect them to avoid warnings
107 */
108#ifdef CONFIG_PCI_MSI
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800109static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000110{
111 struct pci_controller *hose = pci_bus_to_host(dev->bus);
112 struct pnv_phb *phb = hose->private_data;
113 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
114
115 if (!pdn)
116 return NULL;
117 if (pdn->pe_number == IODA_INVALID_PE)
118 return NULL;
119 return &phb->ioda.pe_array[pdn->pe_number];
120}
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000121#endif /* CONFIG_PCI_MSI */
122
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800123static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000124{
125 struct pci_dev *parent;
126 uint8_t bcomp, dcomp, fcomp;
127 long rc, rid_end, rid;
128
129 /* Bus validation ? */
130 if (pe->pbus) {
131 int count;
132
133 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
134 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
135 parent = pe->pbus->self;
Gavin Shanfb446ad2012-08-20 03:49:14 +0000136 if (pe->flags & PNV_IODA_PE_BUS_ALL)
137 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
138 else
139 count = 1;
140
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000141 switch(count) {
142 case 1: bcomp = OpalPciBusAll; break;
143 case 2: bcomp = OpalPciBus7Bits; break;
144 case 4: bcomp = OpalPciBus6Bits; break;
145 case 8: bcomp = OpalPciBus5Bits; break;
146 case 16: bcomp = OpalPciBus4Bits; break;
147 case 32: bcomp = OpalPciBus3Bits; break;
148 default:
149 pr_err("%s: Number of subordinate busses %d"
150 " unsupported\n",
151 pci_name(pe->pbus->self), count);
152 /* Do an exact match only */
153 bcomp = OpalPciBusAll;
154 }
155 rid_end = pe->rid + (count << 8);
156 } else {
157 parent = pe->pdev->bus->self;
158 bcomp = OpalPciBusAll;
159 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
160 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
161 rid_end = pe->rid + 1;
162 }
163
164 /* Associate PE in PELT */
165 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
166 bcomp, dcomp, fcomp, OPAL_MAP_PE);
167 if (rc) {
168 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
169 return -ENXIO;
170 }
171 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
172 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
173
174 /* Add to all parents PELT-V */
175 while (parent) {
176 struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
177 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
178 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +0000179 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000180 /* XXX What to do in case of error ? */
181 }
182 parent = parent->bus->self;
183 }
184 /* Setup reverse map */
185 for (rid = pe->rid; rid < rid_end; rid++)
186 phb->ioda.pe_rmap[rid] = pe->pe_number;
187
188 /* Setup one MVTs on IODA1 */
189 if (phb->type == PNV_PHB_IODA1) {
190 pe->mve_number = pe->pe_number;
191 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
192 pe->pe_number);
193 if (rc) {
194 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
195 rc, pe->mve_number);
196 pe->mve_number = -1;
197 } else {
198 rc = opal_pci_set_mve_enable(phb->opal_id,
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +0000199 pe->mve_number, OPAL_ENABLE_MVE);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000200 if (rc) {
201 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
202 rc, pe->mve_number);
203 pe->mve_number = -1;
204 }
205 }
206 } else if (phb->type == PNV_PHB_IODA2)
207 pe->mve_number = 0;
208
209 return 0;
210}
211
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800212static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
213 struct pnv_ioda_pe *pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000214{
215 struct pnv_ioda_pe *lpe;
216
Gavin Shan7ebdf952012-08-20 03:49:15 +0000217 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000218 if (lpe->dma_weight < pe->dma_weight) {
Gavin Shan7ebdf952012-08-20 03:49:15 +0000219 list_add_tail(&pe->dma_link, &lpe->dma_link);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000220 return;
221 }
222 }
Gavin Shan7ebdf952012-08-20 03:49:15 +0000223 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000224}
225
226static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
227{
228 /* This is quite simplistic. The "base" weight of a device
229 * is 10. 0 means no DMA is to be accounted for it.
230 */
231
232 /* If it's a bridge, no DMA */
233 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
234 return 0;
235
236 /* Reduce the weight of slow USB controllers */
237 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
238 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
239 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
240 return 3;
241
242 /* Increase the weight of RAID (includes Obsidian) */
243 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
244 return 15;
245
246 /* Default */
247 return 10;
248}
249
Gavin Shanfb446ad2012-08-20 03:49:14 +0000250#if 0
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800251static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000252{
253 struct pci_controller *hose = pci_bus_to_host(dev->bus);
254 struct pnv_phb *phb = hose->private_data;
255 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
256 struct pnv_ioda_pe *pe;
257 int pe_num;
258
259 if (!pdn) {
260 pr_err("%s: Device tree node not associated properly\n",
261 pci_name(dev));
262 return NULL;
263 }
264 if (pdn->pe_number != IODA_INVALID_PE)
265 return NULL;
266
267 /* PE#0 has been pre-set */
268 if (dev->bus->number == 0)
269 pe_num = 0;
270 else
271 pe_num = pnv_ioda_alloc_pe(phb);
272 if (pe_num == IODA_INVALID_PE) {
273 pr_warning("%s: Not enough PE# available, disabling device\n",
274 pci_name(dev));
275 return NULL;
276 }
277
278 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
279 * pointer in the PE data structure, both should be destroyed at the
280 * same time. However, this needs to be looked at more closely again
281 * once we actually start removing things (Hotplug, SR-IOV, ...)
282 *
283 * At some point we want to remove the PDN completely anyways
284 */
285 pe = &phb->ioda.pe_array[pe_num];
286 pci_dev_get(dev);
287 pdn->pcidev = dev;
288 pdn->pe_number = pe_num;
289 pe->pdev = dev;
290 pe->pbus = NULL;
291 pe->tce32_seg = -1;
292 pe->mve_number = -1;
293 pe->rid = dev->bus->number << 8 | pdn->devfn;
294
295 pe_info(pe, "Associated device to PE\n");
296
297 if (pnv_ioda_configure_pe(phb, pe)) {
298 /* XXX What do we do here ? */
299 if (pe_num)
300 pnv_ioda_free_pe(phb, pe_num);
301 pdn->pe_number = IODA_INVALID_PE;
302 pe->pdev = NULL;
303 pci_dev_put(dev);
304 return NULL;
305 }
306
307 /* Assign a DMA weight to the device */
308 pe->dma_weight = pnv_ioda_dma_weight(dev);
309 if (pe->dma_weight != 0) {
310 phb->ioda.dma_weight += pe->dma_weight;
311 phb->ioda.dma_pe_count++;
312 }
313
314 /* Link the PE */
315 pnv_ioda_link_pe_by_weight(phb, pe);
316
317 return pe;
318}
Gavin Shanfb446ad2012-08-20 03:49:14 +0000319#endif /* Useful for SRIOV case */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000320
321static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
322{
323 struct pci_dev *dev;
324
325 list_for_each_entry(dev, &bus->devices, bus_list) {
326 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
327
328 if (pdn == NULL) {
329 pr_warn("%s: No device node associated with device !\n",
330 pci_name(dev));
331 continue;
332 }
333 pci_dev_get(dev);
334 pdn->pcidev = dev;
335 pdn->pe_number = pe->pe_number;
336 pe->dma_weight += pnv_ioda_dma_weight(dev);
Gavin Shanfb446ad2012-08-20 03:49:14 +0000337 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000338 pnv_ioda_setup_same_PE(dev->subordinate, pe);
339 }
340}
341
Gavin Shanfb446ad2012-08-20 03:49:14 +0000342/*
343 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
344 * single PCI bus. Another one that contains the primary PCI bus and its
345 * subordinate PCI devices and buses. The second type of PE is normally
346 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
347 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800348static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000349{
Gavin Shanfb446ad2012-08-20 03:49:14 +0000350 struct pci_controller *hose = pci_bus_to_host(bus);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000351 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000352 struct pnv_ioda_pe *pe;
353 int pe_num;
354
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000355 pe_num = pnv_ioda_alloc_pe(phb);
356 if (pe_num == IODA_INVALID_PE) {
Gavin Shanfb446ad2012-08-20 03:49:14 +0000357 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
358 __func__, pci_domain_nr(bus), bus->number);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000359 return;
360 }
361
362 pe = &phb->ioda.pe_array[pe_num];
Gavin Shanfb446ad2012-08-20 03:49:14 +0000363 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000364 pe->pbus = bus;
365 pe->pdev = NULL;
366 pe->tce32_seg = -1;
367 pe->mve_number = -1;
Yinghai Lub918c622012-05-17 18:51:11 -0700368 pe->rid = bus->busn_res.start << 8;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000369 pe->dma_weight = 0;
370
Gavin Shanfb446ad2012-08-20 03:49:14 +0000371 if (all)
372 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
373 bus->busn_res.start, bus->busn_res.end, pe_num);
374 else
375 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
376 bus->busn_res.start, pe_num);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000377
378 if (pnv_ioda_configure_pe(phb, pe)) {
379 /* XXX What do we do here ? */
380 if (pe_num)
381 pnv_ioda_free_pe(phb, pe_num);
382 pe->pbus = NULL;
383 return;
384 }
385
386 /* Associate it with all child devices */
387 pnv_ioda_setup_same_PE(bus, pe);
388
Gavin Shan7ebdf952012-08-20 03:49:15 +0000389 /* Put PE to the list */
390 list_add_tail(&pe->list, &phb->ioda.pe_list);
391
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000392 /* Account for one DMA PE if at least one DMA capable device exist
393 * below the bridge
394 */
395 if (pe->dma_weight != 0) {
396 phb->ioda.dma_weight += pe->dma_weight;
397 phb->ioda.dma_pe_count++;
398 }
399
400 /* Link the PE */
401 pnv_ioda_link_pe_by_weight(phb, pe);
402}
403
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800404static void pnv_ioda_setup_PEs(struct pci_bus *bus)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000405{
406 struct pci_dev *dev;
Gavin Shanfb446ad2012-08-20 03:49:14 +0000407
408 pnv_ioda_setup_bus_PE(bus, 0);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000409
410 list_for_each_entry(dev, &bus->devices, bus_list) {
Gavin Shanfb446ad2012-08-20 03:49:14 +0000411 if (dev->subordinate) {
412 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
413 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
414 else
415 pnv_ioda_setup_PEs(dev->subordinate);
416 }
417 }
418}
419
420/*
421 * Configure PEs so that the downstream PCI buses and devices
422 * could have their associated PE#. Unfortunately, we didn't
423 * figure out the way to identify the PLX bridge yet. So we
424 * simply put the PCI bus and the subordinate behind the root
425 * port to PE# here. The game rule here is expected to be changed
426 * as soon as we can detected PLX bridge correctly.
427 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800428static void pnv_pci_ioda_setup_PEs(void)
Gavin Shanfb446ad2012-08-20 03:49:14 +0000429{
430 struct pci_controller *hose, *tmp;
431
432 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
433 pnv_ioda_setup_PEs(hose->bus);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000434 }
435}
436
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800437static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000438{
439 /* We delay DMA setup after we have assigned all PE# */
440}
441
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800442static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000443{
444 struct pci_dev *dev;
445
446 list_for_each_entry(dev, &bus->devices, bus_list) {
447 set_iommu_table_base(&dev->dev, &pe->tce32_table);
448 if (dev->subordinate)
449 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
450 }
451}
452
Gavin Shan4cce9552013-04-25 19:21:00 +0000453static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
454 u64 *startp, u64 *endp)
455{
456 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
457 unsigned long start, end, inc;
458
459 start = __pa(startp);
460 end = __pa(endp);
461
462 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
463 if (tbl->it_busno) {
464 start <<= 12;
465 end <<= 12;
466 inc = 128 << 12;
467 start |= tbl->it_busno;
468 end |= tbl->it_busno;
469 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
470 /* p7ioc-style invalidation, 2 TCEs per write */
471 start |= (1ull << 63);
472 end |= (1ull << 63);
473 inc = 16;
474 } else {
475 /* Default (older HW) */
476 inc = 128;
477 }
478
479 end |= inc - 1; /* round up end to be different than start */
480
481 mb(); /* Ensure above stores are visible */
482 while (start <= end) {
483 __raw_writeq(start, invalidate);
484 start += inc;
485 }
486
487 /*
488 * The iommu layer will do another mb() for us on build()
489 * and we don't care on free()
490 */
491}
492
493static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
494 struct iommu_table *tbl,
495 u64 *startp, u64 *endp)
496{
497 unsigned long start, end, inc;
498 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
499
500 /* We'll invalidate DMA address in PE scope */
501 start = 0x2ul << 60;
502 start |= (pe->pe_number & 0xFF);
503 end = start;
504
505 /* Figure out the start, end and step */
506 inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
507 start |= (inc << 12);
508 inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
509 end |= (inc << 12);
510 inc = (0x1ul << 12);
511 mb();
512
513 while (start <= end) {
514 __raw_writeq(start, invalidate);
515 start += inc;
516 }
517}
518
519void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
520 u64 *startp, u64 *endp)
521{
522 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
523 tce32_table);
524 struct pnv_phb *phb = pe->phb;
525
526 if (phb->type == PNV_PHB_IODA1)
527 pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
528 else
529 pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
530}
531
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800532static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
533 struct pnv_ioda_pe *pe, unsigned int base,
534 unsigned int segs)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000535{
536
537 struct page *tce_mem = NULL;
538 const __be64 *swinvp;
539 struct iommu_table *tbl;
540 unsigned int i;
541 int64_t rc;
542 void *addr;
543
544 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
545#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
546
547 /* XXX FIXME: Handle 64-bit only DMA devices */
548 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
549 /* XXX FIXME: Allocate multi-level tables on PHB3 */
550
551 /* We shouldn't already have a 32-bit DMA associated */
552 if (WARN_ON(pe->tce32_seg >= 0))
553 return;
554
555 /* Grab a 32-bit TCE table */
556 pe->tce32_seg = base;
557 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
558 (base << 28), ((base + segs) << 28) - 1);
559
560 /* XXX Currently, we allocate one big contiguous table for the
561 * TCEs. We only really need one chunk per 256M of TCE space
562 * (ie per segment) but that's an optimization for later, it
563 * requires some added smarts with our get/put_tce implementation
564 */
565 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
566 get_order(TCE32_TABLE_SIZE * segs));
567 if (!tce_mem) {
568 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
569 goto fail;
570 }
571 addr = page_address(tce_mem);
572 memset(addr, 0, TCE32_TABLE_SIZE * segs);
573
574 /* Configure HW */
575 for (i = 0; i < segs; i++) {
576 rc = opal_pci_map_pe_dma_window(phb->opal_id,
577 pe->pe_number,
578 base + i, 1,
579 __pa(addr) + TCE32_TABLE_SIZE * i,
580 TCE32_TABLE_SIZE, 0x1000);
581 if (rc) {
582 pe_err(pe, " Failed to configure 32-bit TCE table,"
583 " err %ld\n", rc);
584 goto fail;
585 }
586 }
587
588 /* Setup linux iommu table */
589 tbl = &pe->tce32_table;
590 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
591 base << 28);
592
593 /* OPAL variant of P7IOC SW invalidated TCEs */
594 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
595 if (swinvp) {
596 /* We need a couple more fields -- an address and a data
597 * to or. Since the bus is only printed out on table free
598 * errors, and on the first pass the data will be a relative
599 * bus number, print that out instead.
600 */
601 tbl->it_busno = 0;
602 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
Gavin Shan373f5652013-04-25 19:21:01 +0000603 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
604 TCE_PCI_SWINV_PAIR;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000605 }
606 iommu_init_table(tbl, phb->hose->node);
607
608 if (pe->pdev)
609 set_iommu_table_base(&pe->pdev->dev, tbl);
610 else
611 pnv_ioda_setup_bus_dma(pe, pe->pbus);
612
613 return;
614 fail:
615 /* XXX Failure: Try to fallback to 64-bit only ? */
616 if (pe->tce32_seg >= 0)
617 pe->tce32_seg = -1;
618 if (tce_mem)
619 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
620}
621
Gavin Shan373f5652013-04-25 19:21:01 +0000622static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
623 struct pnv_ioda_pe *pe)
624{
625 struct page *tce_mem = NULL;
626 void *addr;
627 const __be64 *swinvp;
628 struct iommu_table *tbl;
629 unsigned int tce_table_size, end;
630 int64_t rc;
631
632 /* We shouldn't already have a 32-bit DMA associated */
633 if (WARN_ON(pe->tce32_seg >= 0))
634 return;
635
636 /* The PE will reserve all possible 32-bits space */
637 pe->tce32_seg = 0;
638 end = (1 << ilog2(phb->ioda.m32_pci_base));
639 tce_table_size = (end / 0x1000) * 8;
640 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
641 end);
642
643 /* Allocate TCE table */
644 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
645 get_order(tce_table_size));
646 if (!tce_mem) {
647 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
648 goto fail;
649 }
650 addr = page_address(tce_mem);
651 memset(addr, 0, tce_table_size);
652
653 /*
654 * Map TCE table through TVT. The TVE index is the PE number
655 * shifted by 1 bit for 32-bits DMA space.
656 */
657 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
658 pe->pe_number << 1, 1, __pa(addr),
659 tce_table_size, 0x1000);
660 if (rc) {
661 pe_err(pe, "Failed to configure 32-bit TCE table,"
662 " err %ld\n", rc);
663 goto fail;
664 }
665
666 /* Setup linux iommu table */
667 tbl = &pe->tce32_table;
668 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0);
669
670 /* OPAL variant of PHB3 invalidated TCEs */
671 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
672 if (swinvp) {
673 /* We need a couple more fields -- an address and a data
674 * to or. Since the bus is only printed out on table free
675 * errors, and on the first pass the data will be a relative
676 * bus number, print that out instead.
677 */
678 tbl->it_busno = 0;
679 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
680 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
681 }
682 iommu_init_table(tbl, phb->hose->node);
683
684 if (pe->pdev)
685 set_iommu_table_base(&pe->pdev->dev, tbl);
686 else
687 pnv_ioda_setup_bus_dma(pe, pe->pbus);
688
689 return;
690fail:
691 if (pe->tce32_seg >= 0)
692 pe->tce32_seg = -1;
693 if (tce_mem)
694 __free_pages(tce_mem, get_order(tce_table_size));
695}
696
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800697static void pnv_ioda_setup_dma(struct pnv_phb *phb)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000698{
699 struct pci_controller *hose = phb->hose;
700 unsigned int residual, remaining, segs, tw, base;
701 struct pnv_ioda_pe *pe;
702
703 /* If we have more PE# than segments available, hand out one
704 * per PE until we run out and let the rest fail. If not,
705 * then we assign at least one segment per PE, plus more based
706 * on the amount of devices under that PE
707 */
708 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
709 residual = 0;
710 else
711 residual = phb->ioda.tce32_count -
712 phb->ioda.dma_pe_count;
713
714 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
715 hose->global_number, phb->ioda.tce32_count);
716 pr_info("PCI: %d PE# for a total weight of %d\n",
717 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
718
719 /* Walk our PE list and configure their DMA segments, hand them
720 * out one base segment plus any residual segments based on
721 * weight
722 */
723 remaining = phb->ioda.tce32_count;
724 tw = phb->ioda.dma_weight;
725 base = 0;
Gavin Shan7ebdf952012-08-20 03:49:15 +0000726 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000727 if (!pe->dma_weight)
728 continue;
729 if (!remaining) {
730 pe_warn(pe, "No DMA32 resources available\n");
731 continue;
732 }
733 segs = 1;
734 if (residual) {
735 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
736 if (segs > remaining)
737 segs = remaining;
738 }
Gavin Shan373f5652013-04-25 19:21:01 +0000739
740 /*
741 * For IODA2 compliant PHB3, we needn't care about the weight.
742 * The all available 32-bits DMA space will be assigned to
743 * the specific PE.
744 */
745 if (phb->type == PNV_PHB_IODA1) {
746 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
747 pe->dma_weight, segs);
748 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
749 } else {
750 pe_info(pe, "Assign DMA32 space\n");
751 segs = 0;
752 pnv_pci_ioda2_setup_dma_pe(phb, pe);
753 }
754
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000755 remaining -= segs;
756 base += segs;
757 }
758}
759
760#ifdef CONFIG_PCI_MSI
Gavin Shan137436c2013-04-25 19:20:59 +0000761static void pnv_ioda2_msi_eoi(struct irq_data *d)
762{
763 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
764 struct irq_chip *chip = irq_data_get_irq_chip(d);
765 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
766 ioda.irq_chip);
767 int64_t rc;
768
769 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
770 WARN_ON_ONCE(rc);
771
772 icp_native_eoi(d);
773}
774
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000775static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
Gavin Shan137436c2013-04-25 19:20:59 +0000776 unsigned int hwirq, unsigned int virq,
777 unsigned int is_64, struct msi_msg *msg)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000778{
779 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
Gavin Shan137436c2013-04-25 19:20:59 +0000780 struct irq_data *idata;
781 struct irq_chip *ichip;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000782 unsigned int xive_num = hwirq - phb->msi_base;
783 uint64_t addr64;
784 uint32_t addr32, data;
785 int rc;
786
787 /* No PE assigned ? bail out ... no MSI for you ! */
788 if (pe == NULL)
789 return -ENXIO;
790
791 /* Check if we have an MVE */
792 if (pe->mve_number < 0)
793 return -ENXIO;
794
795 /* Assign XIVE to PE */
796 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
797 if (rc) {
798 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
799 pci_name(dev), rc, xive_num);
800 return -EIO;
801 }
802
803 if (is_64) {
804 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
805 &addr64, &data);
806 if (rc) {
807 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
808 pci_name(dev), rc);
809 return -EIO;
810 }
811 msg->address_hi = addr64 >> 32;
812 msg->address_lo = addr64 & 0xfffffffful;
813 } else {
814 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
815 &addr32, &data);
816 if (rc) {
817 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
818 pci_name(dev), rc);
819 return -EIO;
820 }
821 msg->address_hi = 0;
822 msg->address_lo = addr32;
823 }
824 msg->data = data;
825
Gavin Shan137436c2013-04-25 19:20:59 +0000826 /*
827 * Change the IRQ chip for the MSI interrupts on PHB3.
828 * The corresponding IRQ chip should be populated for
829 * the first time.
830 */
831 if (phb->type == PNV_PHB_IODA2) {
832 if (!phb->ioda.irq_chip_init) {
833 idata = irq_get_irq_data(virq);
834 ichip = irq_data_get_irq_chip(idata);
835 phb->ioda.irq_chip_init = 1;
836 phb->ioda.irq_chip = *ichip;
837 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
838 }
839
840 irq_set_chip(virq, &phb->ioda.irq_chip);
841 }
842
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000843 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
844 " address=%x_%08x data=%x PE# %d\n",
845 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
846 msg->address_hi, msg->address_lo, data, pe->pe_number);
847
848 return 0;
849}
850
851static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
852{
Gavin Shanfb1b55d2013-03-05 21:12:37 +0000853 unsigned int count;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000854 const __be32 *prop = of_get_property(phb->hose->dn,
855 "ibm,opal-msi-ranges", NULL);
856 if (!prop) {
857 /* BML Fallback */
858 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
859 }
860 if (!prop)
861 return;
862
863 phb->msi_base = be32_to_cpup(prop);
Gavin Shanfb1b55d2013-03-05 21:12:37 +0000864 count = be32_to_cpup(prop + 1);
865 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000866 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
867 phb->hose->global_number);
868 return;
869 }
Gavin Shanfb1b55d2013-03-05 21:12:37 +0000870
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000871 phb->msi_setup = pnv_pci_ioda_msi_setup;
872 phb->msi32_support = 1;
873 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
Gavin Shanfb1b55d2013-03-05 21:12:37 +0000874 count, phb->msi_base);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000875}
876#else
877static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
878#endif /* CONFIG_PCI_MSI */
879
Gavin Shan11685be2012-08-20 03:49:16 +0000880/*
881 * This function is supposed to be called on basis of PE from top
882 * to bottom style. So the the I/O or MMIO segment assigned to
883 * parent PE could be overrided by its child PEs if necessary.
884 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800885static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
886 struct pnv_ioda_pe *pe)
Gavin Shan11685be2012-08-20 03:49:16 +0000887{
888 struct pnv_phb *phb = hose->private_data;
889 struct pci_bus_region region;
890 struct resource *res;
891 int i, index;
892 int rc;
893
894 /*
895 * NOTE: We only care PCI bus based PE for now. For PCI
896 * device based PE, for example SRIOV sensitive VF should
897 * be figured out later.
898 */
899 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
900
901 pci_bus_for_each_resource(pe->pbus, res, i) {
902 if (!res || !res->flags ||
903 res->start > res->end)
904 continue;
905
906 if (res->flags & IORESOURCE_IO) {
907 region.start = res->start - phb->ioda.io_pci_base;
908 region.end = res->end - phb->ioda.io_pci_base;
909 index = region.start / phb->ioda.io_segsize;
910
911 while (index < phb->ioda.total_pe &&
912 region.start <= region.end) {
913 phb->ioda.io_segmap[index] = pe->pe_number;
914 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
915 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
916 if (rc != OPAL_SUCCESS) {
917 pr_err("%s: OPAL error %d when mapping IO "
918 "segment #%d to PE#%d\n",
919 __func__, rc, index, pe->pe_number);
920 break;
921 }
922
923 region.start += phb->ioda.io_segsize;
924 index++;
925 }
926 } else if (res->flags & IORESOURCE_MEM) {
927 region.start = res->start -
928 hose->pci_mem_offset -
929 phb->ioda.m32_pci_base;
930 region.end = res->end -
931 hose->pci_mem_offset -
932 phb->ioda.m32_pci_base;
933 index = region.start / phb->ioda.m32_segsize;
934
935 while (index < phb->ioda.total_pe &&
936 region.start <= region.end) {
937 phb->ioda.m32_segmap[index] = pe->pe_number;
938 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
939 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
940 if (rc != OPAL_SUCCESS) {
941 pr_err("%s: OPAL error %d when mapping M32 "
942 "segment#%d to PE#%d",
943 __func__, rc, index, pe->pe_number);
944 break;
945 }
946
947 region.start += phb->ioda.m32_segsize;
948 index++;
949 }
950 }
951 }
952}
953
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800954static void pnv_pci_ioda_setup_seg(void)
Gavin Shan11685be2012-08-20 03:49:16 +0000955{
956 struct pci_controller *tmp, *hose;
957 struct pnv_phb *phb;
958 struct pnv_ioda_pe *pe;
959
960 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
961 phb = hose->private_data;
962 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
963 pnv_ioda_setup_pe_seg(hose, pe);
964 }
965 }
966}
967
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800968static void pnv_pci_ioda_setup_DMA(void)
Gavin Shan13395c42012-08-20 03:49:17 +0000969{
970 struct pci_controller *hose, *tmp;
Gavin Shandb1266c2012-08-20 03:49:18 +0000971 struct pnv_phb *phb;
Gavin Shan13395c42012-08-20 03:49:17 +0000972
973 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
974 pnv_ioda_setup_dma(hose->private_data);
Gavin Shandb1266c2012-08-20 03:49:18 +0000975
976 /* Mark the PHB initialization done */
977 phb = hose->private_data;
978 phb->initialized = 1;
Gavin Shan13395c42012-08-20 03:49:17 +0000979 }
980}
981
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800982static void pnv_pci_ioda_fixup(void)
Gavin Shanfb446ad2012-08-20 03:49:14 +0000983{
984 pnv_pci_ioda_setup_PEs();
Gavin Shan11685be2012-08-20 03:49:16 +0000985 pnv_pci_ioda_setup_seg();
Gavin Shan13395c42012-08-20 03:49:17 +0000986 pnv_pci_ioda_setup_DMA();
Gavin Shanfb446ad2012-08-20 03:49:14 +0000987}
988
Gavin Shan271fd032012-09-11 16:59:47 -0600989/*
990 * Returns the alignment for I/O or memory windows for P2P
991 * bridges. That actually depends on how PEs are segmented.
992 * For now, we return I/O or M32 segment size for PE sensitive
993 * P2P bridges. Otherwise, the default values (4KiB for I/O,
994 * 1MiB for memory) will be returned.
995 *
996 * The current PCI bus might be put into one PE, which was
997 * create against the parent PCI bridge. For that case, we
998 * needn't enlarge the alignment so that we can save some
999 * resources.
1000 */
1001static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
1002 unsigned long type)
1003{
1004 struct pci_dev *bridge;
1005 struct pci_controller *hose = pci_bus_to_host(bus);
1006 struct pnv_phb *phb = hose->private_data;
1007 int num_pci_bridges = 0;
1008
1009 bridge = bus->self;
1010 while (bridge) {
1011 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
1012 num_pci_bridges++;
1013 if (num_pci_bridges >= 2)
1014 return 1;
1015 }
1016
1017 bridge = bridge->bus->self;
1018 }
1019
1020 /* We need support prefetchable memory window later */
1021 if (type & IORESOURCE_MEM)
1022 return phb->ioda.m32_segsize;
1023
1024 return phb->ioda.io_segsize;
1025}
1026
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001027/* Prevent enabling devices for which we couldn't properly
1028 * assign a PE
1029 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001030static int pnv_pci_enable_device_hook(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001031{
Gavin Shandb1266c2012-08-20 03:49:18 +00001032 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1033 struct pnv_phb *phb = hose->private_data;
1034 struct pci_dn *pdn;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001035
Gavin Shandb1266c2012-08-20 03:49:18 +00001036 /* The function is probably called while the PEs have
1037 * not be created yet. For example, resource reassignment
1038 * during PCI probe period. We just skip the check if
1039 * PEs isn't ready.
1040 */
1041 if (!phb->initialized)
1042 return 0;
1043
1044 pdn = pnv_ioda_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001045 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1046 return -EINVAL;
Gavin Shandb1266c2012-08-20 03:49:18 +00001047
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001048 return 0;
1049}
1050
1051static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1052 u32 devfn)
1053{
1054 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1055}
1056
Gavin Shanaa0c0332013-04-25 19:20:57 +00001057void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001058{
1059 struct pci_controller *hose;
1060 static int primary = 1;
1061 struct pnv_phb *phb;
1062 unsigned long size, m32map_off, iomap_off, pemap_off;
1063 const u64 *prop64;
Gavin Shanaa0c0332013-04-25 19:20:57 +00001064 const u32 *prop32;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001065 u64 phb_id;
1066 void *aux;
1067 long rc;
1068
Gavin Shanaa0c0332013-04-25 19:20:57 +00001069 pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001070
1071 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
1072 if (!prop64) {
1073 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
1074 return;
1075 }
1076 phb_id = be64_to_cpup(prop64);
1077 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
1078
1079 phb = alloc_bootmem(sizeof(struct pnv_phb));
1080 if (phb) {
1081 memset(phb, 0, sizeof(struct pnv_phb));
1082 phb->hose = hose = pcibios_alloc_controller(np);
1083 }
1084 if (!phb || !phb->hose) {
1085 pr_err("PCI: Failed to allocate PCI controller for %s\n",
1086 np->full_name);
1087 return;
1088 }
1089
1090 spin_lock_init(&phb->lock);
1091 /* XXX Use device-tree */
1092 hose->first_busno = 0;
1093 hose->last_busno = 0xff;
1094 hose->private_data = phb;
1095 phb->opal_id = phb_id;
Gavin Shanaa0c0332013-04-25 19:20:57 +00001096 phb->type = ioda_type;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001097
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +00001098 /* Detect specific models for error handling */
1099 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
1100 phb->model = PNV_PHB_MODEL_P7IOC;
Gavin Shanaa0c0332013-04-25 19:20:57 +00001101 else if (of_device_is_compatible(np, "ibm,p8-pciex"))
1102 phb->model = PNV_PHB_MODEL_PHB3;
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +00001103 else
1104 phb->model = PNV_PHB_MODEL_UNKNOWN;
1105
Gavin Shanaa0c0332013-04-25 19:20:57 +00001106 /* Parse 32-bit and IO ranges (if any) */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001107 pci_process_bridge_OF_ranges(phb->hose, np, primary);
1108 primary = 0;
1109
Gavin Shanaa0c0332013-04-25 19:20:57 +00001110 /* Get registers */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001111 phb->regs = of_iomap(np, 0);
1112 if (phb->regs == NULL)
1113 pr_err(" Failed to map registers !\n");
1114
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001115 /* Initialize more IODA stuff */
Gavin Shanaa0c0332013-04-25 19:20:57 +00001116 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
1117 if (!prop32)
1118 phb->ioda.total_pe = 1;
1119 else
1120 phb->ioda.total_pe = *prop32;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001121
1122 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
Gavin Shanaa0c0332013-04-25 19:20:57 +00001123 /* FW Has already off top 64k of M32 space (MSI space) */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001124 phb->ioda.m32_size += 0x10000;
1125
1126 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
1127 phb->ioda.m32_pci_base = hose->mem_resources[0].start -
1128 hose->pci_mem_offset;
1129 phb->ioda.io_size = hose->pci_io_size;
1130 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
1131 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
1132
Gavin Shanaa0c0332013-04-25 19:20:57 +00001133 /* Allocate aux data & arrays
1134 *
1135 * XXX TODO: Don't allocate io segmap on PHB3
1136 */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001137 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1138 m32map_off = size;
Gavin Shane47747f2012-08-20 03:49:19 +00001139 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001140 iomap_off = size;
Gavin Shane47747f2012-08-20 03:49:19 +00001141 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001142 pemap_off = size;
1143 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1144 aux = alloc_bootmem(size);
1145 memset(aux, 0, size);
1146 phb->ioda.pe_alloc = aux;
1147 phb->ioda.m32_segmap = aux + m32map_off;
1148 phb->ioda.io_segmap = aux + iomap_off;
1149 phb->ioda.pe_array = aux + pemap_off;
1150 set_bit(0, phb->ioda.pe_alloc);
1151
Gavin Shan7ebdf952012-08-20 03:49:15 +00001152 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001153 INIT_LIST_HEAD(&phb->ioda.pe_list);
1154
1155 /* Calculate how many 32-bit TCE segments we have */
1156 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
1157
1158 /* Clear unusable m64 */
1159 hose->mem_resources[1].flags = 0;
1160 hose->mem_resources[1].start = 0;
1161 hose->mem_resources[1].end = 0;
1162 hose->mem_resources[2].flags = 0;
1163 hose->mem_resources[2].start = 0;
1164 hose->mem_resources[2].end = 0;
1165
Gavin Shanaa0c0332013-04-25 19:20:57 +00001166#if 0 /* We should really do that ... */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001167 rc = opal_pci_set_phb_mem_window(opal->phb_id,
1168 window_type,
1169 window_num,
1170 starting_real_address,
1171 starting_pci_address,
1172 segment_size);
1173#endif
1174
1175 pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n",
1176 phb->ioda.total_pe,
1177 phb->ioda.m32_size, phb->ioda.m32_segsize,
1178 phb->ioda.io_size, phb->ioda.io_segsize);
1179
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001180 phb->hose->ops = &pnv_pci_ops;
1181
1182 /* Setup RID -> PE mapping function */
1183 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
1184
1185 /* Setup TCEs */
1186 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1187
1188 /* Setup MSI support */
1189 pnv_pci_init_ioda_msis(phb);
1190
Gavin Shanc40a4212012-08-20 03:49:20 +00001191 /*
1192 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1193 * to let the PCI core do resource assignment. It's supposed
1194 * that the PCI core will do correct I/O and MMIO alignment
1195 * for the P2P bridge bars so that each PCI bus (excluding
1196 * the child P2P bridges) can form individual PE.
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001197 */
Gavin Shanfb446ad2012-08-20 03:49:14 +00001198 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001199 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
Gavin Shan271fd032012-09-11 16:59:47 -06001200 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
Gavin Shanc40a4212012-08-20 03:49:20 +00001201 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001202
1203 /* Reset IODA tables to a clean state */
Benjamin Herrenschmidtf11fe552011-11-29 18:22:50 +00001204 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001205 if (rc)
Benjamin Herrenschmidtf11fe552011-11-29 18:22:50 +00001206 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
Gavin Shanaa0c0332013-04-25 19:20:57 +00001207
1208 /*
1209 * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset
1210 * has cleared the RTT which has the same effect
1211 */
1212 if (ioda_type == PNV_PHB_IODA1)
1213 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
1214}
1215
1216void pnv_pci_init_ioda2_phb(struct device_node *np)
1217{
1218 pnv_pci_init_ioda_phb(np, PNV_PHB_IODA2);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001219}
1220
1221void __init pnv_pci_init_ioda_hub(struct device_node *np)
1222{
1223 struct device_node *phbn;
1224 const u64 *prop64;
1225 u64 hub_id;
1226
1227 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
1228
1229 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
1230 if (!prop64) {
1231 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
1232 return;
1233 }
1234 hub_id = be64_to_cpup(prop64);
1235 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
1236
1237 /* Count child PHBs */
1238 for_each_child_of_node(np, phbn) {
1239 /* Look for IODA1 PHBs */
1240 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
Gavin Shanaa0c0332013-04-25 19:20:57 +00001241 pnv_pci_init_ioda_phb(phbn, PNV_PHB_IODA1);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001242 }
1243}