blob: 035db476be0e02137128d285d01e1390fed9355a [file] [log] [blame]
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001/*
2 * Support PCI/PCIe on PowerNV platforms
3 *
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +000012#undef DEBUG
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000013
14#include <linux/kernel.h>
15#include <linux/pci.h>
Gavin Shan361f2a22014-04-24 18:00:25 +100016#include <linux/crash_dump.h>
Gavin Shan37c367f2013-06-20 18:13:25 +080017#include <linux/debugfs.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000018#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22#include <linux/irq.h>
23#include <linux/io.h>
24#include <linux/msi.h>
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +110025#include <linux/memblock.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000026
27#include <asm/sections.h>
28#include <asm/io.h>
29#include <asm/prom.h>
30#include <asm/pci-bridge.h>
31#include <asm/machdep.h>
Gavin Shanfb1b55d2013-03-05 21:12:37 +000032#include <asm/msi_bitmap.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000033#include <asm/ppc-pci.h>
34#include <asm/opal.h>
35#include <asm/iommu.h>
36#include <asm/tce.h>
Gavin Shan137436c2013-04-25 19:20:59 +000037#include <asm/xics.h>
Gavin Shan37c367f2013-06-20 18:13:25 +080038#include <asm/debug.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000039
40#include "powernv.h"
41#include "pci.h"
42
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000043#define define_pe_printk_level(func, kern_level) \
44static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
45{ \
46 struct va_format vaf; \
47 va_list args; \
Gavin Shan490e0782012-10-17 19:53:30 +000048 char pfix[32]; \
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000049 int r; \
50 \
51 va_start(args, fmt); \
52 \
53 vaf.fmt = fmt; \
54 vaf.va = &args; \
55 \
Gavin Shan490e0782012-10-17 19:53:30 +000056 if (pe->pdev) \
57 strlcpy(pfix, dev_name(&pe->pdev->dev), \
58 sizeof(pfix)); \
59 else \
60 sprintf(pfix, "%04x:%02x ", \
61 pci_domain_nr(pe->pbus), \
62 pe->pbus->number); \
63 r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
64 pfix, pe->pe_number, &vaf); \
65 \
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000066 va_end(args); \
67 \
68 return r; \
69} \
70
71define_pe_printk_level(pe_err, KERN_ERR);
72define_pe_printk_level(pe_warn, KERN_WARNING);
73define_pe_printk_level(pe_info, KERN_INFO);
74
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +100075/*
76 * stdcix is only supposed to be used in hypervisor real mode as per
77 * the architecture spec
78 */
79static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
80{
81 __asm__ __volatile__("stdcix %0,0,%1"
82 : : "r" (val), "r" (paddr) : "memory");
83}
84
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -080085static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000086{
87 unsigned long pe;
88
89 do {
90 pe = find_next_zero_bit(phb->ioda.pe_alloc,
91 phb->ioda.total_pe, 0);
92 if (pe >= phb->ioda.total_pe)
93 return IODA_INVALID_PE;
94 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
95
Gavin Shan4cce9552013-04-25 19:21:00 +000096 phb->ioda.pe_array[pe].phb = phb;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000097 phb->ioda.pe_array[pe].pe_number = pe;
98 return pe;
99}
100
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800101static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000102{
103 WARN_ON(phb->ioda.pe_array[pe].pdev);
104
105 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
106 clear_bit(pe, phb->ioda.pe_alloc);
107}
108
109/* Currently those 2 are only used when MSIs are enabled, this will change
110 * but in the meantime, we need to protect them to avoid warnings
111 */
112#ifdef CONFIG_PCI_MSI
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800113static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000114{
115 struct pci_controller *hose = pci_bus_to_host(dev->bus);
116 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000117 struct pci_dn *pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000118
119 if (!pdn)
120 return NULL;
121 if (pdn->pe_number == IODA_INVALID_PE)
122 return NULL;
123 return &phb->ioda.pe_array[pdn->pe_number];
124}
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000125#endif /* CONFIG_PCI_MSI */
126
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800127static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000128{
129 struct pci_dev *parent;
130 uint8_t bcomp, dcomp, fcomp;
131 long rc, rid_end, rid;
132
133 /* Bus validation ? */
134 if (pe->pbus) {
135 int count;
136
137 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
138 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
139 parent = pe->pbus->self;
Gavin Shanfb446ad2012-08-20 03:49:14 +0000140 if (pe->flags & PNV_IODA_PE_BUS_ALL)
141 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
142 else
143 count = 1;
144
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000145 switch(count) {
146 case 1: bcomp = OpalPciBusAll; break;
147 case 2: bcomp = OpalPciBus7Bits; break;
148 case 4: bcomp = OpalPciBus6Bits; break;
149 case 8: bcomp = OpalPciBus5Bits; break;
150 case 16: bcomp = OpalPciBus4Bits; break;
151 case 32: bcomp = OpalPciBus3Bits; break;
152 default:
153 pr_err("%s: Number of subordinate busses %d"
154 " unsupported\n",
155 pci_name(pe->pbus->self), count);
156 /* Do an exact match only */
157 bcomp = OpalPciBusAll;
158 }
159 rid_end = pe->rid + (count << 8);
160 } else {
161 parent = pe->pdev->bus->self;
162 bcomp = OpalPciBusAll;
163 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
164 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
165 rid_end = pe->rid + 1;
166 }
167
Gavin Shan631ad692013-11-04 16:32:46 +0800168 /*
169 * Associate PE in PELT. We need add the PE into the
170 * corresponding PELT-V as well. Otherwise, the error
171 * originated from the PE might contribute to other
172 * PEs.
173 */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000174 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
175 bcomp, dcomp, fcomp, OPAL_MAP_PE);
176 if (rc) {
177 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
178 return -ENXIO;
179 }
Gavin Shan631ad692013-11-04 16:32:46 +0800180
181 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
182 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
183 if (rc)
184 pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000185 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
186 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
187
188 /* Add to all parents PELT-V */
189 while (parent) {
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000190 struct pci_dn *pdn = pci_get_pdn(parent);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000191 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
192 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +0000193 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000194 /* XXX What to do in case of error ? */
195 }
196 parent = parent->bus->self;
197 }
198 /* Setup reverse map */
199 for (rid = pe->rid; rid < rid_end; rid++)
200 phb->ioda.pe_rmap[rid] = pe->pe_number;
201
202 /* Setup one MVTs on IODA1 */
203 if (phb->type == PNV_PHB_IODA1) {
204 pe->mve_number = pe->pe_number;
205 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
206 pe->pe_number);
207 if (rc) {
208 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
209 rc, pe->mve_number);
210 pe->mve_number = -1;
211 } else {
212 rc = opal_pci_set_mve_enable(phb->opal_id,
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +0000213 pe->mve_number, OPAL_ENABLE_MVE);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000214 if (rc) {
215 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
216 rc, pe->mve_number);
217 pe->mve_number = -1;
218 }
219 }
220 } else if (phb->type == PNV_PHB_IODA2)
221 pe->mve_number = 0;
222
223 return 0;
224}
225
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800226static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
227 struct pnv_ioda_pe *pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000228{
229 struct pnv_ioda_pe *lpe;
230
Gavin Shan7ebdf952012-08-20 03:49:15 +0000231 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000232 if (lpe->dma_weight < pe->dma_weight) {
Gavin Shan7ebdf952012-08-20 03:49:15 +0000233 list_add_tail(&pe->dma_link, &lpe->dma_link);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000234 return;
235 }
236 }
Gavin Shan7ebdf952012-08-20 03:49:15 +0000237 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000238}
239
240static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
241{
242 /* This is quite simplistic. The "base" weight of a device
243 * is 10. 0 means no DMA is to be accounted for it.
244 */
245
246 /* If it's a bridge, no DMA */
247 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
248 return 0;
249
250 /* Reduce the weight of slow USB controllers */
251 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
252 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
253 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
254 return 3;
255
256 /* Increase the weight of RAID (includes Obsidian) */
257 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
258 return 15;
259
260 /* Default */
261 return 10;
262}
263
Gavin Shanfb446ad2012-08-20 03:49:14 +0000264#if 0
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800265static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000266{
267 struct pci_controller *hose = pci_bus_to_host(dev->bus);
268 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000269 struct pci_dn *pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000270 struct pnv_ioda_pe *pe;
271 int pe_num;
272
273 if (!pdn) {
274 pr_err("%s: Device tree node not associated properly\n",
275 pci_name(dev));
276 return NULL;
277 }
278 if (pdn->pe_number != IODA_INVALID_PE)
279 return NULL;
280
281 /* PE#0 has been pre-set */
282 if (dev->bus->number == 0)
283 pe_num = 0;
284 else
285 pe_num = pnv_ioda_alloc_pe(phb);
286 if (pe_num == IODA_INVALID_PE) {
287 pr_warning("%s: Not enough PE# available, disabling device\n",
288 pci_name(dev));
289 return NULL;
290 }
291
292 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
293 * pointer in the PE data structure, both should be destroyed at the
294 * same time. However, this needs to be looked at more closely again
295 * once we actually start removing things (Hotplug, SR-IOV, ...)
296 *
297 * At some point we want to remove the PDN completely anyways
298 */
299 pe = &phb->ioda.pe_array[pe_num];
300 pci_dev_get(dev);
301 pdn->pcidev = dev;
302 pdn->pe_number = pe_num;
303 pe->pdev = dev;
304 pe->pbus = NULL;
305 pe->tce32_seg = -1;
306 pe->mve_number = -1;
307 pe->rid = dev->bus->number << 8 | pdn->devfn;
308
309 pe_info(pe, "Associated device to PE\n");
310
311 if (pnv_ioda_configure_pe(phb, pe)) {
312 /* XXX What do we do here ? */
313 if (pe_num)
314 pnv_ioda_free_pe(phb, pe_num);
315 pdn->pe_number = IODA_INVALID_PE;
316 pe->pdev = NULL;
317 pci_dev_put(dev);
318 return NULL;
319 }
320
321 /* Assign a DMA weight to the device */
322 pe->dma_weight = pnv_ioda_dma_weight(dev);
323 if (pe->dma_weight != 0) {
324 phb->ioda.dma_weight += pe->dma_weight;
325 phb->ioda.dma_pe_count++;
326 }
327
328 /* Link the PE */
329 pnv_ioda_link_pe_by_weight(phb, pe);
330
331 return pe;
332}
Gavin Shanfb446ad2012-08-20 03:49:14 +0000333#endif /* Useful for SRIOV case */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000334
335static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
336{
337 struct pci_dev *dev;
338
339 list_for_each_entry(dev, &bus->devices, bus_list) {
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000340 struct pci_dn *pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000341
342 if (pdn == NULL) {
343 pr_warn("%s: No device node associated with device !\n",
344 pci_name(dev));
345 continue;
346 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000347 pdn->pcidev = dev;
348 pdn->pe_number = pe->pe_number;
349 pe->dma_weight += pnv_ioda_dma_weight(dev);
Gavin Shanfb446ad2012-08-20 03:49:14 +0000350 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000351 pnv_ioda_setup_same_PE(dev->subordinate, pe);
352 }
353}
354
Gavin Shanfb446ad2012-08-20 03:49:14 +0000355/*
356 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
357 * single PCI bus. Another one that contains the primary PCI bus and its
358 * subordinate PCI devices and buses. The second type of PE is normally
359 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
360 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800361static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000362{
Gavin Shanfb446ad2012-08-20 03:49:14 +0000363 struct pci_controller *hose = pci_bus_to_host(bus);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000364 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000365 struct pnv_ioda_pe *pe;
366 int pe_num;
367
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000368 pe_num = pnv_ioda_alloc_pe(phb);
369 if (pe_num == IODA_INVALID_PE) {
Gavin Shanfb446ad2012-08-20 03:49:14 +0000370 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
371 __func__, pci_domain_nr(bus), bus->number);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000372 return;
373 }
374
375 pe = &phb->ioda.pe_array[pe_num];
Gavin Shanfb446ad2012-08-20 03:49:14 +0000376 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000377 pe->pbus = bus;
378 pe->pdev = NULL;
379 pe->tce32_seg = -1;
380 pe->mve_number = -1;
Yinghai Lub918c622012-05-17 18:51:11 -0700381 pe->rid = bus->busn_res.start << 8;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000382 pe->dma_weight = 0;
383
Gavin Shanfb446ad2012-08-20 03:49:14 +0000384 if (all)
385 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
386 bus->busn_res.start, bus->busn_res.end, pe_num);
387 else
388 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
389 bus->busn_res.start, pe_num);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000390
391 if (pnv_ioda_configure_pe(phb, pe)) {
392 /* XXX What do we do here ? */
393 if (pe_num)
394 pnv_ioda_free_pe(phb, pe_num);
395 pe->pbus = NULL;
396 return;
397 }
398
399 /* Associate it with all child devices */
400 pnv_ioda_setup_same_PE(bus, pe);
401
Gavin Shan7ebdf952012-08-20 03:49:15 +0000402 /* Put PE to the list */
403 list_add_tail(&pe->list, &phb->ioda.pe_list);
404
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000405 /* Account for one DMA PE if at least one DMA capable device exist
406 * below the bridge
407 */
408 if (pe->dma_weight != 0) {
409 phb->ioda.dma_weight += pe->dma_weight;
410 phb->ioda.dma_pe_count++;
411 }
412
413 /* Link the PE */
414 pnv_ioda_link_pe_by_weight(phb, pe);
415}
416
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800417static void pnv_ioda_setup_PEs(struct pci_bus *bus)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000418{
419 struct pci_dev *dev;
Gavin Shanfb446ad2012-08-20 03:49:14 +0000420
421 pnv_ioda_setup_bus_PE(bus, 0);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000422
423 list_for_each_entry(dev, &bus->devices, bus_list) {
Gavin Shanfb446ad2012-08-20 03:49:14 +0000424 if (dev->subordinate) {
425 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
426 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
427 else
428 pnv_ioda_setup_PEs(dev->subordinate);
429 }
430 }
431}
432
433/*
434 * Configure PEs so that the downstream PCI buses and devices
435 * could have their associated PE#. Unfortunately, we didn't
436 * figure out the way to identify the PLX bridge yet. So we
437 * simply put the PCI bus and the subordinate behind the root
438 * port to PE# here. The game rule here is expected to be changed
439 * as soon as we can detected PLX bridge correctly.
440 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800441static void pnv_pci_ioda_setup_PEs(void)
Gavin Shanfb446ad2012-08-20 03:49:14 +0000442{
443 struct pci_controller *hose, *tmp;
444
445 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
446 pnv_ioda_setup_PEs(hose->bus);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000447 }
448}
449
Gavin Shan959c9bd2013-04-25 19:21:02 +0000450static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000451{
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000452 struct pci_dn *pdn = pci_get_pdn(pdev);
Gavin Shan959c9bd2013-04-25 19:21:02 +0000453 struct pnv_ioda_pe *pe;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000454
Gavin Shan959c9bd2013-04-25 19:21:02 +0000455 /*
456 * The function can be called while the PE#
457 * hasn't been assigned. Do nothing for the
458 * case.
459 */
460 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
461 return;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000462
Gavin Shan959c9bd2013-04-25 19:21:02 +0000463 pe = &phb->ioda.pe_array[pdn->pe_number];
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100464 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
Wei Yang3f28c5a2014-04-23 10:26:32 +0800465 set_iommu_table_base(&pdev->dev, &pe->tce32_table);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000466}
467
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100468static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
469 struct pci_dev *pdev, u64 dma_mask)
470{
471 struct pci_dn *pdn = pci_get_pdn(pdev);
472 struct pnv_ioda_pe *pe;
473 uint64_t top;
474 bool bypass = false;
475
476 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
477 return -ENODEV;;
478
479 pe = &phb->ioda.pe_array[pdn->pe_number];
480 if (pe->tce_bypass_enabled) {
481 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
482 bypass = (dma_mask >= top);
483 }
484
485 if (bypass) {
486 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
487 set_dma_ops(&pdev->dev, &dma_direct_ops);
488 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
489 } else {
490 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
491 set_dma_ops(&pdev->dev, &dma_iommu_ops);
492 set_iommu_table_base(&pdev->dev, &pe->tce32_table);
493 }
Brian W Harta32305b2014-07-31 14:24:37 -0500494 *pdev->dev.dma_mask = dma_mask;
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100495 return 0;
496}
497
Gavin Shandff4a392014-07-15 17:00:55 +1000498static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
499 struct pci_bus *bus,
500 bool add_to_iommu_group)
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +1000501{
502 struct pci_dev *dev;
503
504 list_for_each_entry(dev, &bus->devices, bus_list) {
Gavin Shandff4a392014-07-15 17:00:55 +1000505 if (add_to_iommu_group)
506 set_iommu_table_base_and_group(&dev->dev,
507 &pe->tce32_table);
508 else
509 set_iommu_table_base(&dev->dev, &pe->tce32_table);
510
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +1000511 if (dev->subordinate)
Gavin Shandff4a392014-07-15 17:00:55 +1000512 pnv_ioda_setup_bus_dma(pe, dev->subordinate,
513 add_to_iommu_group);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +1000514 }
515}
516
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000517static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe,
518 struct iommu_table *tbl,
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +1100519 __be64 *startp, __be64 *endp, bool rm)
Gavin Shan4cce9552013-04-25 19:21:00 +0000520{
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +1100521 __be64 __iomem *invalidate = rm ?
522 (__be64 __iomem *)pe->tce_inval_reg_phys :
523 (__be64 __iomem *)tbl->it_index;
Gavin Shan4cce9552013-04-25 19:21:00 +0000524 unsigned long start, end, inc;
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +1000525 const unsigned shift = tbl->it_page_shift;
Gavin Shan4cce9552013-04-25 19:21:00 +0000526
527 start = __pa(startp);
528 end = __pa(endp);
529
530 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
531 if (tbl->it_busno) {
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +1000532 start <<= shift;
533 end <<= shift;
534 inc = 128ull << shift;
Gavin Shan4cce9552013-04-25 19:21:00 +0000535 start |= tbl->it_busno;
536 end |= tbl->it_busno;
537 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
538 /* p7ioc-style invalidation, 2 TCEs per write */
539 start |= (1ull << 63);
540 end |= (1ull << 63);
541 inc = 16;
542 } else {
543 /* Default (older HW) */
544 inc = 128;
545 }
546
547 end |= inc - 1; /* round up end to be different than start */
548
549 mb(); /* Ensure above stores are visible */
550 while (start <= end) {
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000551 if (rm)
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +1100552 __raw_rm_writeq(cpu_to_be64(start), invalidate);
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000553 else
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +1100554 __raw_writeq(cpu_to_be64(start), invalidate);
Gavin Shan4cce9552013-04-25 19:21:00 +0000555 start += inc;
556 }
557
558 /*
559 * The iommu layer will do another mb() for us on build()
560 * and we don't care on free()
561 */
562}
563
564static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
565 struct iommu_table *tbl,
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +1100566 __be64 *startp, __be64 *endp, bool rm)
Gavin Shan4cce9552013-04-25 19:21:00 +0000567{
568 unsigned long start, end, inc;
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +1100569 __be64 __iomem *invalidate = rm ?
570 (__be64 __iomem *)pe->tce_inval_reg_phys :
571 (__be64 __iomem *)tbl->it_index;
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +1000572 const unsigned shift = tbl->it_page_shift;
Gavin Shan4cce9552013-04-25 19:21:00 +0000573
574 /* We'll invalidate DMA address in PE scope */
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +1000575 start = 0x2ull << 60;
Gavin Shan4cce9552013-04-25 19:21:00 +0000576 start |= (pe->pe_number & 0xFF);
577 end = start;
578
579 /* Figure out the start, end and step */
580 inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +1000581 start |= (inc << shift);
Gavin Shan4cce9552013-04-25 19:21:00 +0000582 inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +1000583 end |= (inc << shift);
584 inc = (0x1ull << shift);
Gavin Shan4cce9552013-04-25 19:21:00 +0000585 mb();
586
587 while (start <= end) {
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000588 if (rm)
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +1100589 __raw_rm_writeq(cpu_to_be64(start), invalidate);
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000590 else
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +1100591 __raw_writeq(cpu_to_be64(start), invalidate);
Gavin Shan4cce9552013-04-25 19:21:00 +0000592 start += inc;
593 }
594}
595
596void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +1100597 __be64 *startp, __be64 *endp, bool rm)
Gavin Shan4cce9552013-04-25 19:21:00 +0000598{
599 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
600 tce32_table);
601 struct pnv_phb *phb = pe->phb;
602
603 if (phb->type == PNV_PHB_IODA1)
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000604 pnv_pci_ioda1_tce_invalidate(pe, tbl, startp, endp, rm);
Gavin Shan4cce9552013-04-25 19:21:00 +0000605 else
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000606 pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm);
Gavin Shan4cce9552013-04-25 19:21:00 +0000607}
608
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800609static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
610 struct pnv_ioda_pe *pe, unsigned int base,
611 unsigned int segs)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000612{
613
614 struct page *tce_mem = NULL;
615 const __be64 *swinvp;
616 struct iommu_table *tbl;
617 unsigned int i;
618 int64_t rc;
619 void *addr;
620
621 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
622#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
623
624 /* XXX FIXME: Handle 64-bit only DMA devices */
625 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
626 /* XXX FIXME: Allocate multi-level tables on PHB3 */
627
628 /* We shouldn't already have a 32-bit DMA associated */
629 if (WARN_ON(pe->tce32_seg >= 0))
630 return;
631
632 /* Grab a 32-bit TCE table */
633 pe->tce32_seg = base;
634 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
635 (base << 28), ((base + segs) << 28) - 1);
636
637 /* XXX Currently, we allocate one big contiguous table for the
638 * TCEs. We only really need one chunk per 256M of TCE space
639 * (ie per segment) but that's an optimization for later, it
640 * requires some added smarts with our get/put_tce implementation
641 */
642 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
643 get_order(TCE32_TABLE_SIZE * segs));
644 if (!tce_mem) {
645 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
646 goto fail;
647 }
648 addr = page_address(tce_mem);
649 memset(addr, 0, TCE32_TABLE_SIZE * segs);
650
651 /* Configure HW */
652 for (i = 0; i < segs; i++) {
653 rc = opal_pci_map_pe_dma_window(phb->opal_id,
654 pe->pe_number,
655 base + i, 1,
656 __pa(addr) + TCE32_TABLE_SIZE * i,
657 TCE32_TABLE_SIZE, 0x1000);
658 if (rc) {
659 pe_err(pe, " Failed to configure 32-bit TCE table,"
660 " err %ld\n", rc);
661 goto fail;
662 }
663 }
664
665 /* Setup linux iommu table */
666 tbl = &pe->tce32_table;
667 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
Alexey Kardashevskiy8fa5d452014-06-06 18:44:03 +1000668 base << 28, IOMMU_PAGE_SHIFT_4K);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000669
670 /* OPAL variant of P7IOC SW invalidated TCEs */
671 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
672 if (swinvp) {
673 /* We need a couple more fields -- an address and a data
674 * to or. Since the bus is only printed out on table free
675 * errors, and on the first pass the data will be a relative
676 * bus number, print that out instead.
677 */
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000678 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
679 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
680 8);
Gavin Shan65fd7662014-04-24 18:00:28 +1000681 tbl->it_type |= (TCE_PCI_SWINV_CREATE |
682 TCE_PCI_SWINV_FREE |
683 TCE_PCI_SWINV_PAIR);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000684 }
685 iommu_init_table(tbl, phb->hose->node);
Gavin Shane9bc03f2014-04-24 18:00:29 +1000686 iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000687
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +1000688 if (pe->pdev)
Alexey Kardashevskiyd905c5d2013-11-21 17:43:14 +1100689 set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +1000690 else
Gavin Shandff4a392014-07-15 17:00:55 +1000691 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +1000692
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000693 return;
694 fail:
695 /* XXX Failure: Try to fallback to 64-bit only ? */
696 if (pe->tce32_seg >= 0)
697 pe->tce32_seg = -1;
698 if (tce_mem)
699 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
700}
701
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100702static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
703{
704 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
705 tce32_table);
706 uint16_t window_id = (pe->pe_number << 1 ) + 1;
707 int64_t rc;
708
709 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
710 if (enable) {
711 phys_addr_t top = memblock_end_of_DRAM();
712
713 top = roundup_pow_of_two(top);
714 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
715 pe->pe_number,
716 window_id,
717 pe->tce_bypass_base,
718 top);
719 } else {
720 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
721 pe->pe_number,
722 window_id,
723 pe->tce_bypass_base,
724 0);
725
726 /*
Gavin Shandff4a392014-07-15 17:00:55 +1000727 * EEH needs the mapping between IOMMU table and group
728 * of those VFIO/KVM pass-through devices. We can postpone
729 * resetting DMA ops until the DMA mask is configured in
730 * host side.
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100731 */
Gavin Shandff4a392014-07-15 17:00:55 +1000732 if (pe->pdev)
733 set_iommu_table_base(&pe->pdev->dev, tbl);
734 else
735 pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100736 }
737 if (rc)
738 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
739 else
740 pe->tce_bypass_enabled = enable;
741}
742
743static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
744 struct pnv_ioda_pe *pe)
745{
746 /* TVE #1 is selected by PCI address bit 59 */
747 pe->tce_bypass_base = 1ull << 59;
748
749 /* Install set_bypass callback for VFIO */
750 pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
751
752 /* Enable bypass by default */
753 pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
754}
755
Gavin Shan373f5652013-04-25 19:21:01 +0000756static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
757 struct pnv_ioda_pe *pe)
758{
759 struct page *tce_mem = NULL;
760 void *addr;
761 const __be64 *swinvp;
762 struct iommu_table *tbl;
763 unsigned int tce_table_size, end;
764 int64_t rc;
765
766 /* We shouldn't already have a 32-bit DMA associated */
767 if (WARN_ON(pe->tce32_seg >= 0))
768 return;
769
770 /* The PE will reserve all possible 32-bits space */
771 pe->tce32_seg = 0;
772 end = (1 << ilog2(phb->ioda.m32_pci_base));
773 tce_table_size = (end / 0x1000) * 8;
774 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
775 end);
776
777 /* Allocate TCE table */
778 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
779 get_order(tce_table_size));
780 if (!tce_mem) {
781 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
782 goto fail;
783 }
784 addr = page_address(tce_mem);
785 memset(addr, 0, tce_table_size);
786
787 /*
788 * Map TCE table through TVT. The TVE index is the PE number
789 * shifted by 1 bit for 32-bits DMA space.
790 */
791 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
792 pe->pe_number << 1, 1, __pa(addr),
793 tce_table_size, 0x1000);
794 if (rc) {
795 pe_err(pe, "Failed to configure 32-bit TCE table,"
796 " err %ld\n", rc);
797 goto fail;
798 }
799
800 /* Setup linux iommu table */
801 tbl = &pe->tce32_table;
Alexey Kardashevskiy8fa5d452014-06-06 18:44:03 +1000802 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
803 IOMMU_PAGE_SHIFT_4K);
Gavin Shan373f5652013-04-25 19:21:01 +0000804
805 /* OPAL variant of PHB3 invalidated TCEs */
806 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
807 if (swinvp) {
808 /* We need a couple more fields -- an address and a data
809 * to or. Since the bus is only printed out on table free
810 * errors, and on the first pass the data will be a relative
811 * bus number, print that out instead.
812 */
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000813 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
814 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
815 8);
Gavin Shan65fd7662014-04-24 18:00:28 +1000816 tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
Gavin Shan373f5652013-04-25 19:21:01 +0000817 }
818 iommu_init_table(tbl, phb->hose->node);
Gavin Shane9bc03f2014-04-24 18:00:29 +1000819 iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
Gavin Shan373f5652013-04-25 19:21:01 +0000820
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +1000821 if (pe->pdev)
Alexey Kardashevskiyd905c5d2013-11-21 17:43:14 +1100822 set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +1000823 else
Gavin Shandff4a392014-07-15 17:00:55 +1000824 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +1000825
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100826 /* Also create a bypass window */
827 pnv_pci_ioda2_setup_bypass_pe(phb, pe);
Gavin Shan373f5652013-04-25 19:21:01 +0000828 return;
829fail:
830 if (pe->tce32_seg >= 0)
831 pe->tce32_seg = -1;
832 if (tce_mem)
833 __free_pages(tce_mem, get_order(tce_table_size));
834}
835
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800836static void pnv_ioda_setup_dma(struct pnv_phb *phb)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000837{
838 struct pci_controller *hose = phb->hose;
839 unsigned int residual, remaining, segs, tw, base;
840 struct pnv_ioda_pe *pe;
841
842 /* If we have more PE# than segments available, hand out one
843 * per PE until we run out and let the rest fail. If not,
844 * then we assign at least one segment per PE, plus more based
845 * on the amount of devices under that PE
846 */
847 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
848 residual = 0;
849 else
850 residual = phb->ioda.tce32_count -
851 phb->ioda.dma_pe_count;
852
853 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
854 hose->global_number, phb->ioda.tce32_count);
855 pr_info("PCI: %d PE# for a total weight of %d\n",
856 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
857
858 /* Walk our PE list and configure their DMA segments, hand them
859 * out one base segment plus any residual segments based on
860 * weight
861 */
862 remaining = phb->ioda.tce32_count;
863 tw = phb->ioda.dma_weight;
864 base = 0;
Gavin Shan7ebdf952012-08-20 03:49:15 +0000865 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000866 if (!pe->dma_weight)
867 continue;
868 if (!remaining) {
869 pe_warn(pe, "No DMA32 resources available\n");
870 continue;
871 }
872 segs = 1;
873 if (residual) {
874 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
875 if (segs > remaining)
876 segs = remaining;
877 }
Gavin Shan373f5652013-04-25 19:21:01 +0000878
879 /*
880 * For IODA2 compliant PHB3, we needn't care about the weight.
881 * The all available 32-bits DMA space will be assigned to
882 * the specific PE.
883 */
884 if (phb->type == PNV_PHB_IODA1) {
885 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
886 pe->dma_weight, segs);
887 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
888 } else {
889 pe_info(pe, "Assign DMA32 space\n");
890 segs = 0;
891 pnv_pci_ioda2_setup_dma_pe(phb, pe);
892 }
893
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000894 remaining -= segs;
895 base += segs;
896 }
897}
898
899#ifdef CONFIG_PCI_MSI
Gavin Shan137436c2013-04-25 19:20:59 +0000900static void pnv_ioda2_msi_eoi(struct irq_data *d)
901{
902 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
903 struct irq_chip *chip = irq_data_get_irq_chip(d);
904 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
905 ioda.irq_chip);
906 int64_t rc;
907
908 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
909 WARN_ON_ONCE(rc);
910
911 icp_native_eoi(d);
912}
913
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000914static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
Gavin Shan137436c2013-04-25 19:20:59 +0000915 unsigned int hwirq, unsigned int virq,
916 unsigned int is_64, struct msi_msg *msg)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000917{
918 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000919 struct pci_dn *pdn = pci_get_pdn(dev);
Gavin Shan137436c2013-04-25 19:20:59 +0000920 struct irq_data *idata;
921 struct irq_chip *ichip;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000922 unsigned int xive_num = hwirq - phb->msi_base;
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +1000923 __be32 data;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000924 int rc;
925
926 /* No PE assigned ? bail out ... no MSI for you ! */
927 if (pe == NULL)
928 return -ENXIO;
929
930 /* Check if we have an MVE */
931 if (pe->mve_number < 0)
932 return -ENXIO;
933
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000934 /* Force 32-bit MSI on some broken devices */
935 if (pdn && pdn->force_32bit_msi)
936 is_64 = 0;
937
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000938 /* Assign XIVE to PE */
939 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
940 if (rc) {
941 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
942 pci_name(dev), rc, xive_num);
943 return -EIO;
944 }
945
946 if (is_64) {
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +1000947 __be64 addr64;
948
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000949 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
950 &addr64, &data);
951 if (rc) {
952 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
953 pci_name(dev), rc);
954 return -EIO;
955 }
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +1000956 msg->address_hi = be64_to_cpu(addr64) >> 32;
957 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000958 } else {
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +1000959 __be32 addr32;
960
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000961 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
962 &addr32, &data);
963 if (rc) {
964 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
965 pci_name(dev), rc);
966 return -EIO;
967 }
968 msg->address_hi = 0;
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +1000969 msg->address_lo = be32_to_cpu(addr32);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000970 }
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +1000971 msg->data = be32_to_cpu(data);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000972
Gavin Shan137436c2013-04-25 19:20:59 +0000973 /*
974 * Change the IRQ chip for the MSI interrupts on PHB3.
975 * The corresponding IRQ chip should be populated for
976 * the first time.
977 */
978 if (phb->type == PNV_PHB_IODA2) {
979 if (!phb->ioda.irq_chip_init) {
980 idata = irq_get_irq_data(virq);
981 ichip = irq_data_get_irq_chip(idata);
982 phb->ioda.irq_chip_init = 1;
983 phb->ioda.irq_chip = *ichip;
984 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
985 }
986
987 irq_set_chip(virq, &phb->ioda.irq_chip);
988 }
989
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000990 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
991 " address=%x_%08x data=%x PE# %d\n",
992 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
993 msg->address_hi, msg->address_lo, data, pe->pe_number);
994
995 return 0;
996}
997
998static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
999{
Gavin Shanfb1b55d2013-03-05 21:12:37 +00001000 unsigned int count;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001001 const __be32 *prop = of_get_property(phb->hose->dn,
1002 "ibm,opal-msi-ranges", NULL);
1003 if (!prop) {
1004 /* BML Fallback */
1005 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
1006 }
1007 if (!prop)
1008 return;
1009
1010 phb->msi_base = be32_to_cpup(prop);
Gavin Shanfb1b55d2013-03-05 21:12:37 +00001011 count = be32_to_cpup(prop + 1);
1012 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001013 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
1014 phb->hose->global_number);
1015 return;
1016 }
Gavin Shanfb1b55d2013-03-05 21:12:37 +00001017
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001018 phb->msi_setup = pnv_pci_ioda_msi_setup;
1019 phb->msi32_support = 1;
1020 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
Gavin Shanfb1b55d2013-03-05 21:12:37 +00001021 count, phb->msi_base);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001022}
1023#else
1024static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
1025#endif /* CONFIG_PCI_MSI */
1026
Gavin Shan11685be2012-08-20 03:49:16 +00001027/*
1028 * This function is supposed to be called on basis of PE from top
1029 * to bottom style. So the the I/O or MMIO segment assigned to
1030 * parent PE could be overrided by its child PEs if necessary.
1031 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001032static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
1033 struct pnv_ioda_pe *pe)
Gavin Shan11685be2012-08-20 03:49:16 +00001034{
1035 struct pnv_phb *phb = hose->private_data;
1036 struct pci_bus_region region;
1037 struct resource *res;
1038 int i, index;
1039 int rc;
1040
1041 /*
1042 * NOTE: We only care PCI bus based PE for now. For PCI
1043 * device based PE, for example SRIOV sensitive VF should
1044 * be figured out later.
1045 */
1046 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
1047
1048 pci_bus_for_each_resource(pe->pbus, res, i) {
1049 if (!res || !res->flags ||
1050 res->start > res->end)
1051 continue;
1052
1053 if (res->flags & IORESOURCE_IO) {
1054 region.start = res->start - phb->ioda.io_pci_base;
1055 region.end = res->end - phb->ioda.io_pci_base;
1056 index = region.start / phb->ioda.io_segsize;
1057
1058 while (index < phb->ioda.total_pe &&
1059 region.start <= region.end) {
1060 phb->ioda.io_segmap[index] = pe->pe_number;
1061 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1062 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
1063 if (rc != OPAL_SUCCESS) {
1064 pr_err("%s: OPAL error %d when mapping IO "
1065 "segment #%d to PE#%d\n",
1066 __func__, rc, index, pe->pe_number);
1067 break;
1068 }
1069
1070 region.start += phb->ioda.io_segsize;
1071 index++;
1072 }
1073 } else if (res->flags & IORESOURCE_MEM) {
Benjamin Herrenschmidt3fd47f02013-05-06 13:40:40 +10001074 /* WARNING: Assumes M32 is mem region 0 in PHB. We need to
1075 * harden that algorithm when we start supporting M64
1076 */
Gavin Shan11685be2012-08-20 03:49:16 +00001077 region.start = res->start -
Benjamin Herrenschmidt3fd47f02013-05-06 13:40:40 +10001078 hose->mem_offset[0] -
Gavin Shan11685be2012-08-20 03:49:16 +00001079 phb->ioda.m32_pci_base;
1080 region.end = res->end -
Benjamin Herrenschmidt3fd47f02013-05-06 13:40:40 +10001081 hose->mem_offset[0] -
Gavin Shan11685be2012-08-20 03:49:16 +00001082 phb->ioda.m32_pci_base;
1083 index = region.start / phb->ioda.m32_segsize;
1084
1085 while (index < phb->ioda.total_pe &&
1086 region.start <= region.end) {
1087 phb->ioda.m32_segmap[index] = pe->pe_number;
1088 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1089 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
1090 if (rc != OPAL_SUCCESS) {
1091 pr_err("%s: OPAL error %d when mapping M32 "
1092 "segment#%d to PE#%d",
1093 __func__, rc, index, pe->pe_number);
1094 break;
1095 }
1096
1097 region.start += phb->ioda.m32_segsize;
1098 index++;
1099 }
1100 }
1101 }
1102}
1103
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001104static void pnv_pci_ioda_setup_seg(void)
Gavin Shan11685be2012-08-20 03:49:16 +00001105{
1106 struct pci_controller *tmp, *hose;
1107 struct pnv_phb *phb;
1108 struct pnv_ioda_pe *pe;
1109
1110 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1111 phb = hose->private_data;
1112 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
1113 pnv_ioda_setup_pe_seg(hose, pe);
1114 }
1115 }
1116}
1117
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001118static void pnv_pci_ioda_setup_DMA(void)
Gavin Shan13395c42012-08-20 03:49:17 +00001119{
1120 struct pci_controller *hose, *tmp;
Gavin Shandb1266c2012-08-20 03:49:18 +00001121 struct pnv_phb *phb;
Gavin Shan13395c42012-08-20 03:49:17 +00001122
1123 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1124 pnv_ioda_setup_dma(hose->private_data);
Gavin Shandb1266c2012-08-20 03:49:18 +00001125
1126 /* Mark the PHB initialization done */
1127 phb = hose->private_data;
1128 phb->initialized = 1;
Gavin Shan13395c42012-08-20 03:49:17 +00001129 }
1130}
1131
Gavin Shan37c367f2013-06-20 18:13:25 +08001132static void pnv_pci_ioda_create_dbgfs(void)
1133{
1134#ifdef CONFIG_DEBUG_FS
1135 struct pci_controller *hose, *tmp;
1136 struct pnv_phb *phb;
1137 char name[16];
1138
1139 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1140 phb = hose->private_data;
1141
1142 sprintf(name, "PCI%04x", hose->global_number);
1143 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
1144 if (!phb->dbgfs)
1145 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
1146 __func__, hose->global_number);
1147 }
1148#endif /* CONFIG_DEBUG_FS */
1149}
1150
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001151static void pnv_pci_ioda_fixup(void)
Gavin Shanfb446ad2012-08-20 03:49:14 +00001152{
1153 pnv_pci_ioda_setup_PEs();
Gavin Shan11685be2012-08-20 03:49:16 +00001154 pnv_pci_ioda_setup_seg();
Gavin Shan13395c42012-08-20 03:49:17 +00001155 pnv_pci_ioda_setup_DMA();
Gavin Shane9cc17d2013-06-20 13:21:14 +08001156
Gavin Shan37c367f2013-06-20 18:13:25 +08001157 pnv_pci_ioda_create_dbgfs();
1158
Gavin Shane9cc17d2013-06-20 13:21:14 +08001159#ifdef CONFIG_EEH
Gavin Shan88b6d142013-06-27 13:46:45 +08001160 eeh_probe_mode_set(EEH_PROBE_MODE_DEV);
Gavin Shane9cc17d2013-06-20 13:21:14 +08001161 eeh_init();
Mike Qiudadcd6d2014-06-26 02:58:47 -04001162 eeh_addr_cache_build();
Gavin Shane9cc17d2013-06-20 13:21:14 +08001163#endif
Gavin Shanfb446ad2012-08-20 03:49:14 +00001164}
1165
Gavin Shan271fd032012-09-11 16:59:47 -06001166/*
1167 * Returns the alignment for I/O or memory windows for P2P
1168 * bridges. That actually depends on how PEs are segmented.
1169 * For now, we return I/O or M32 segment size for PE sensitive
1170 * P2P bridges. Otherwise, the default values (4KiB for I/O,
1171 * 1MiB for memory) will be returned.
1172 *
1173 * The current PCI bus might be put into one PE, which was
1174 * create against the parent PCI bridge. For that case, we
1175 * needn't enlarge the alignment so that we can save some
1176 * resources.
1177 */
1178static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
1179 unsigned long type)
1180{
1181 struct pci_dev *bridge;
1182 struct pci_controller *hose = pci_bus_to_host(bus);
1183 struct pnv_phb *phb = hose->private_data;
1184 int num_pci_bridges = 0;
1185
1186 bridge = bus->self;
1187 while (bridge) {
1188 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
1189 num_pci_bridges++;
1190 if (num_pci_bridges >= 2)
1191 return 1;
1192 }
1193
1194 bridge = bridge->bus->self;
1195 }
1196
1197 /* We need support prefetchable memory window later */
1198 if (type & IORESOURCE_MEM)
1199 return phb->ioda.m32_segsize;
1200
1201 return phb->ioda.io_segsize;
1202}
1203
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001204/* Prevent enabling devices for which we couldn't properly
1205 * assign a PE
1206 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001207static int pnv_pci_enable_device_hook(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001208{
Gavin Shandb1266c2012-08-20 03:49:18 +00001209 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1210 struct pnv_phb *phb = hose->private_data;
1211 struct pci_dn *pdn;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001212
Gavin Shandb1266c2012-08-20 03:49:18 +00001213 /* The function is probably called while the PEs have
1214 * not be created yet. For example, resource reassignment
1215 * during PCI probe period. We just skip the check if
1216 * PEs isn't ready.
1217 */
1218 if (!phb->initialized)
1219 return 0;
1220
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00001221 pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001222 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1223 return -EINVAL;
Gavin Shandb1266c2012-08-20 03:49:18 +00001224
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001225 return 0;
1226}
1227
1228static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1229 u32 devfn)
1230{
1231 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1232}
1233
Benjamin Herrenschmidt73ed1482013-05-10 16:59:18 +10001234static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
1235{
1236 opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
1237 OPAL_ASSERT_RESET);
1238}
1239
Gavin Shane9cc17d2013-06-20 13:21:14 +08001240void __init pnv_pci_init_ioda_phb(struct device_node *np,
1241 u64 hub_id, int ioda_type)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001242{
1243 struct pci_controller *hose;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001244 struct pnv_phb *phb;
Gavin Shan81846162013-12-26 09:29:40 +08001245 unsigned long size, m32map_off, pemap_off, iomap_off = 0;
Alistair Popplec681b932013-09-23 12:04:57 +10001246 const __be64 *prop64;
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10001247 const __be32 *prop32;
Gavin Shanf1b7cc32013-07-31 16:47:01 +08001248 int len;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001249 u64 phb_id;
1250 void *aux;
1251 long rc;
1252
Gavin Shan58d714e2013-07-31 16:47:00 +08001253 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001254
1255 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
1256 if (!prop64) {
1257 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
1258 return;
1259 }
1260 phb_id = be64_to_cpup(prop64);
1261 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
1262
1263 phb = alloc_bootmem(sizeof(struct pnv_phb));
Gavin Shan58d714e2013-07-31 16:47:00 +08001264 if (!phb) {
1265 pr_err(" Out of memory !\n");
1266 return;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001267 }
Gavin Shan58d714e2013-07-31 16:47:00 +08001268
1269 /* Allocate PCI controller */
1270 memset(phb, 0, sizeof(struct pnv_phb));
1271 phb->hose = hose = pcibios_alloc_controller(np);
1272 if (!phb->hose) {
1273 pr_err(" Can't allocate PCI controller for %s\n",
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001274 np->full_name);
Gavin Shan58d714e2013-07-31 16:47:00 +08001275 free_bootmem((unsigned long)phb, sizeof(struct pnv_phb));
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001276 return;
1277 }
1278
1279 spin_lock_init(&phb->lock);
Gavin Shanf1b7cc32013-07-31 16:47:01 +08001280 prop32 = of_get_property(np, "bus-range", &len);
1281 if (prop32 && len == 8) {
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10001282 hose->first_busno = be32_to_cpu(prop32[0]);
1283 hose->last_busno = be32_to_cpu(prop32[1]);
Gavin Shanf1b7cc32013-07-31 16:47:01 +08001284 } else {
1285 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
1286 hose->first_busno = 0;
1287 hose->last_busno = 0xff;
1288 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001289 hose->private_data = phb;
Gavin Shane9cc17d2013-06-20 13:21:14 +08001290 phb->hub_id = hub_id;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001291 phb->opal_id = phb_id;
Gavin Shanaa0c0332013-04-25 19:20:57 +00001292 phb->type = ioda_type;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001293
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +00001294 /* Detect specific models for error handling */
1295 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
1296 phb->model = PNV_PHB_MODEL_P7IOC;
Benjamin Herrenschmidtf3d40c22013-05-04 14:24:32 +00001297 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
Gavin Shanaa0c0332013-04-25 19:20:57 +00001298 phb->model = PNV_PHB_MODEL_PHB3;
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +00001299 else
1300 phb->model = PNV_PHB_MODEL_UNKNOWN;
1301
Gavin Shanaa0c0332013-04-25 19:20:57 +00001302 /* Parse 32-bit and IO ranges (if any) */
Gavin Shan2f1ec022013-07-31 16:47:02 +08001303 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001304
Gavin Shanaa0c0332013-04-25 19:20:57 +00001305 /* Get registers */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001306 phb->regs = of_iomap(np, 0);
1307 if (phb->regs == NULL)
1308 pr_err(" Failed to map registers !\n");
1309
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001310 /* Initialize more IODA stuff */
Gavin Shan36954dc2013-11-04 16:32:47 +08001311 phb->ioda.total_pe = 1;
Gavin Shanaa0c0332013-04-25 19:20:57 +00001312 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
Gavin Shan36954dc2013-11-04 16:32:47 +08001313 if (prop32)
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10001314 phb->ioda.total_pe = be32_to_cpup(prop32);
Gavin Shan36954dc2013-11-04 16:32:47 +08001315 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
1316 if (prop32)
1317 phb->ioda.reserved_pe = be32_to_cpup(prop32);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001318 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
Gavin Shanaa0c0332013-04-25 19:20:57 +00001319 /* FW Has already off top 64k of M32 space (MSI space) */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001320 phb->ioda.m32_size += 0x10000;
1321
1322 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
Benjamin Herrenschmidt3fd47f02013-05-06 13:40:40 +10001323 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001324 phb->ioda.io_size = hose->pci_io_size;
1325 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
1326 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
1327
Gavin Shanc35d2a82013-07-31 16:47:04 +08001328 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001329 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1330 m32map_off = size;
Gavin Shane47747f2012-08-20 03:49:19 +00001331 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
Gavin Shanc35d2a82013-07-31 16:47:04 +08001332 if (phb->type == PNV_PHB_IODA1) {
1333 iomap_off = size;
1334 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1335 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001336 pemap_off = size;
1337 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1338 aux = alloc_bootmem(size);
1339 memset(aux, 0, size);
1340 phb->ioda.pe_alloc = aux;
1341 phb->ioda.m32_segmap = aux + m32map_off;
Gavin Shanc35d2a82013-07-31 16:47:04 +08001342 if (phb->type == PNV_PHB_IODA1)
1343 phb->ioda.io_segmap = aux + iomap_off;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001344 phb->ioda.pe_array = aux + pemap_off;
Gavin Shan36954dc2013-11-04 16:32:47 +08001345 set_bit(phb->ioda.reserved_pe, phb->ioda.pe_alloc);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001346
Gavin Shan7ebdf952012-08-20 03:49:15 +00001347 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001348 INIT_LIST_HEAD(&phb->ioda.pe_list);
1349
1350 /* Calculate how many 32-bit TCE segments we have */
1351 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
1352
1353 /* Clear unusable m64 */
1354 hose->mem_resources[1].flags = 0;
1355 hose->mem_resources[1].start = 0;
1356 hose->mem_resources[1].end = 0;
1357 hose->mem_resources[2].flags = 0;
1358 hose->mem_resources[2].start = 0;
1359 hose->mem_resources[2].end = 0;
1360
Gavin Shanaa0c0332013-04-25 19:20:57 +00001361#if 0 /* We should really do that ... */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001362 rc = opal_pci_set_phb_mem_window(opal->phb_id,
1363 window_type,
1364 window_num,
1365 starting_real_address,
1366 starting_pci_address,
1367 segment_size);
1368#endif
1369
Gavin Shan36954dc2013-11-04 16:32:47 +08001370 pr_info(" %d (%d) PE's M32: 0x%x [segment=0x%x]"
1371 " IO: 0x%x [segment=0x%x]\n",
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001372 phb->ioda.total_pe,
Gavin Shan36954dc2013-11-04 16:32:47 +08001373 phb->ioda.reserved_pe,
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001374 phb->ioda.m32_size, phb->ioda.m32_segsize,
1375 phb->ioda.io_size, phb->ioda.io_segsize);
1376
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001377 phb->hose->ops = &pnv_pci_ops;
Gavin Shane9cc17d2013-06-20 13:21:14 +08001378#ifdef CONFIG_EEH
1379 phb->eeh_ops = &ioda_eeh_ops;
1380#endif
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001381
1382 /* Setup RID -> PE mapping function */
1383 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
1384
1385 /* Setup TCEs */
1386 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001387 phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001388
Benjamin Herrenschmidt73ed1482013-05-10 16:59:18 +10001389 /* Setup shutdown function for kexec */
1390 phb->shutdown = pnv_pci_ioda_shutdown;
1391
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001392 /* Setup MSI support */
1393 pnv_pci_init_ioda_msis(phb);
1394
Gavin Shanc40a4212012-08-20 03:49:20 +00001395 /*
1396 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1397 * to let the PCI core do resource assignment. It's supposed
1398 * that the PCI core will do correct I/O and MMIO alignment
1399 * for the P2P bridge bars so that each PCI bus (excluding
1400 * the child P2P bridges) can form individual PE.
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001401 */
Gavin Shanfb446ad2012-08-20 03:49:14 +00001402 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001403 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
Gavin Shan271fd032012-09-11 16:59:47 -06001404 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
Gavin Shand92a2082014-04-24 18:00:24 +10001405 ppc_md.pcibios_reset_secondary_bus = pnv_pci_reset_secondary_bus;
Gavin Shanc40a4212012-08-20 03:49:20 +00001406 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001407
1408 /* Reset IODA tables to a clean state */
Benjamin Herrenschmidtf11fe552011-11-29 18:22:50 +00001409 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001410 if (rc)
Benjamin Herrenschmidtf11fe552011-11-29 18:22:50 +00001411 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
Gavin Shan361f2a22014-04-24 18:00:25 +10001412
1413 /* If we're running in kdump kerenl, the previous kerenl never
1414 * shutdown PCI devices correctly. We already got IODA table
1415 * cleaned out. So we have to issue PHB reset to stop all PCI
1416 * transactions from previous kerenl.
1417 */
1418 if (is_kdump_kernel()) {
1419 pr_info(" Issue PHB reset ...\n");
1420 ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
1421 ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET);
1422 }
Gavin Shanaa0c0332013-04-25 19:20:57 +00001423}
1424
Bjorn Helgaas67975002013-07-02 12:20:03 -06001425void __init pnv_pci_init_ioda2_phb(struct device_node *np)
Gavin Shanaa0c0332013-04-25 19:20:57 +00001426{
Gavin Shane9cc17d2013-06-20 13:21:14 +08001427 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001428}
1429
1430void __init pnv_pci_init_ioda_hub(struct device_node *np)
1431{
1432 struct device_node *phbn;
Alistair Popplec681b932013-09-23 12:04:57 +10001433 const __be64 *prop64;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001434 u64 hub_id;
1435
1436 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
1437
1438 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
1439 if (!prop64) {
1440 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
1441 return;
1442 }
1443 hub_id = be64_to_cpup(prop64);
1444 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
1445
1446 /* Count child PHBs */
1447 for_each_child_of_node(np, phbn) {
1448 /* Look for IODA1 PHBs */
1449 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
Gavin Shane9cc17d2013-06-20 13:21:14 +08001450 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001451 }
1452}