blob: 279dadf43d5af09dbeaa8ced23e300027155becd [file] [log] [blame]
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001/*
2 * Support PCI/PCIe on PowerNV platforms
3 *
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +000012#undef DEBUG
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000013
14#include <linux/kernel.h>
15#include <linux/pci.h>
Gavin Shan361f2a22014-04-24 18:00:25 +100016#include <linux/crash_dump.h>
Gavin Shan37c367f2013-06-20 18:13:25 +080017#include <linux/debugfs.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000018#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22#include <linux/irq.h>
23#include <linux/io.h>
24#include <linux/msi.h>
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +110025#include <linux/memblock.h>
Alexey Kardashevskiyac9a5882015-06-05 16:34:56 +100026#include <linux/iommu.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000027
28#include <asm/sections.h>
29#include <asm/io.h>
30#include <asm/prom.h>
31#include <asm/pci-bridge.h>
32#include <asm/machdep.h>
Gavin Shanfb1b55d2013-03-05 21:12:37 +000033#include <asm/msi_bitmap.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000034#include <asm/ppc-pci.h>
35#include <asm/opal.h>
36#include <asm/iommu.h>
37#include <asm/tce.h>
Gavin Shan137436c2013-04-25 19:20:59 +000038#include <asm/xics.h>
Gavin Shan37c367f2013-06-20 18:13:25 +080039#include <asm/debug.h>
Guo Chao262af552014-07-21 14:42:30 +100040#include <asm/firmware.h>
Ian Munsie80c49c72014-10-08 19:54:57 +110041#include <asm/pnv-pci.h>
42
Michael Neulingec249dd2015-05-27 16:07:16 +100043#include <misc/cxl-base.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000044
45#include "powernv.h"
46#include "pci.h"
47
Wei Yang781a8682015-03-25 16:23:57 +080048/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
49#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
50
Joe Perches6d31c2f2014-09-21 10:55:06 -070051static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
52 const char *fmt, ...)
53{
54 struct va_format vaf;
55 va_list args;
56 char pfix[32];
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000057
Joe Perches6d31c2f2014-09-21 10:55:06 -070058 va_start(args, fmt);
59
60 vaf.fmt = fmt;
61 vaf.va = &args;
62
Wei Yang781a8682015-03-25 16:23:57 +080063 if (pe->flags & PNV_IODA_PE_DEV)
Joe Perches6d31c2f2014-09-21 10:55:06 -070064 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
Wei Yang781a8682015-03-25 16:23:57 +080065 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
Joe Perches6d31c2f2014-09-21 10:55:06 -070066 sprintf(pfix, "%04x:%02x ",
67 pci_domain_nr(pe->pbus), pe->pbus->number);
Wei Yang781a8682015-03-25 16:23:57 +080068#ifdef CONFIG_PCI_IOV
69 else if (pe->flags & PNV_IODA_PE_VF)
70 sprintf(pfix, "%04x:%02x:%2x.%d",
71 pci_domain_nr(pe->parent_dev->bus),
72 (pe->rid & 0xff00) >> 8,
73 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
74#endif /* CONFIG_PCI_IOV*/
Joe Perches6d31c2f2014-09-21 10:55:06 -070075
76 printk("%spci %s: [PE# %.3d] %pV",
77 level, pfix, pe->pe_number, &vaf);
78
79 va_end(args);
80}
81
82#define pe_err(pe, fmt, ...) \
83 pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
84#define pe_warn(pe, fmt, ...) \
85 pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
86#define pe_info(pe, fmt, ...) \
87 pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000088
Thadeu Lima de Souza Cascardo4e287842014-10-23 19:19:35 -020089static bool pnv_iommu_bypass_disabled __read_mostly;
90
91static int __init iommu_setup(char *str)
92{
93 if (!str)
94 return -EINVAL;
95
96 while (*str) {
97 if (!strncmp(str, "nobypass", 8)) {
98 pnv_iommu_bypass_disabled = true;
99 pr_info("PowerNV: IOMMU bypass window disabled.\n");
100 break;
101 }
102 str += strcspn(str, ",");
103 if (*str == ',')
104 str++;
105 }
106
107 return 0;
108}
109early_param("iommu", iommu_setup);
110
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +1000111/*
112 * stdcix is only supposed to be used in hypervisor real mode as per
113 * the architecture spec
114 */
115static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
116{
117 __asm__ __volatile__("stdcix %0,0,%1"
118 : : "r" (val), "r" (paddr) : "memory");
119}
120
Guo Chao262af552014-07-21 14:42:30 +1000121static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
122{
123 return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
124 (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
125}
126
Gavin Shan4b82ab12014-11-12 13:36:07 +1100127static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
128{
129 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe)) {
130 pr_warn("%s: Invalid PE %d on PHB#%x\n",
131 __func__, pe_no, phb->hose->global_number);
132 return;
133 }
134
135 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
136 pr_warn("%s: PE %d was assigned on PHB#%x\n",
137 __func__, pe_no, phb->hose->global_number);
138 return;
139 }
140
141 phb->ioda.pe_array[pe_no].phb = phb;
142 phb->ioda.pe_array[pe_no].pe_number = pe_no;
143}
144
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800145static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000146{
147 unsigned long pe;
148
149 do {
150 pe = find_next_zero_bit(phb->ioda.pe_alloc,
151 phb->ioda.total_pe, 0);
152 if (pe >= phb->ioda.total_pe)
153 return IODA_INVALID_PE;
154 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
155
Gavin Shan4cce9552013-04-25 19:21:00 +0000156 phb->ioda.pe_array[pe].phb = phb;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000157 phb->ioda.pe_array[pe].pe_number = pe;
158 return pe;
159}
160
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800161static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000162{
163 WARN_ON(phb->ioda.pe_array[pe].pdev);
164
165 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
166 clear_bit(pe, phb->ioda.pe_alloc);
167}
168
Guo Chao262af552014-07-21 14:42:30 +1000169/* The default M64 BAR is shared by all PEs */
170static int pnv_ioda2_init_m64(struct pnv_phb *phb)
171{
172 const char *desc;
173 struct resource *r;
174 s64 rc;
175
176 /* Configure the default M64 BAR */
177 rc = opal_pci_set_phb_mem_window(phb->opal_id,
178 OPAL_M64_WINDOW_TYPE,
179 phb->ioda.m64_bar_idx,
180 phb->ioda.m64_base,
181 0, /* unused */
182 phb->ioda.m64_size);
183 if (rc != OPAL_SUCCESS) {
184 desc = "configuring";
185 goto fail;
186 }
187
188 /* Enable the default M64 BAR */
189 rc = opal_pci_phb_mmio_enable(phb->opal_id,
190 OPAL_M64_WINDOW_TYPE,
191 phb->ioda.m64_bar_idx,
192 OPAL_ENABLE_M64_SPLIT);
193 if (rc != OPAL_SUCCESS) {
194 desc = "enabling";
195 goto fail;
196 }
197
198 /* Mark the M64 BAR assigned */
199 set_bit(phb->ioda.m64_bar_idx, &phb->ioda.m64_bar_alloc);
200
201 /*
202 * Strip off the segment used by the reserved PE, which is
203 * expected to be 0 or last one of PE capabicity.
204 */
205 r = &phb->hose->mem_resources[1];
206 if (phb->ioda.reserved_pe == 0)
207 r->start += phb->ioda.m64_segsize;
208 else if (phb->ioda.reserved_pe == (phb->ioda.total_pe - 1))
209 r->end -= phb->ioda.m64_segsize;
210 else
211 pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
212 phb->ioda.reserved_pe);
213
214 return 0;
215
216fail:
217 pr_warn(" Failure %lld %s M64 BAR#%d\n",
218 rc, desc, phb->ioda.m64_bar_idx);
219 opal_pci_phb_mmio_enable(phb->opal_id,
220 OPAL_M64_WINDOW_TYPE,
221 phb->ioda.m64_bar_idx,
222 OPAL_DISABLE_M64);
223 return -EIO;
224}
225
Gavin Shan5ef73562014-11-12 13:36:06 +1100226static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
Guo Chao262af552014-07-21 14:42:30 +1000227{
228 resource_size_t sgsz = phb->ioda.m64_segsize;
229 struct pci_dev *pdev;
230 struct resource *r;
231 int base, step, i;
232
233 /*
234 * Root bus always has full M64 range and root port has
235 * M64 range used in reality. So we're checking root port
236 * instead of root bus.
237 */
238 list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
Gavin Shan4b82ab12014-11-12 13:36:07 +1100239 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
240 r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
Guo Chao262af552014-07-21 14:42:30 +1000241 if (!r->parent ||
242 !pnv_pci_is_mem_pref_64(r->flags))
243 continue;
244
245 base = (r->start - phb->ioda.m64_base) / sgsz;
246 for (step = 0; step < resource_size(r) / sgsz; step++)
Gavin Shan4b82ab12014-11-12 13:36:07 +1100247 pnv_ioda_reserve_pe(phb, base + step);
Guo Chao262af552014-07-21 14:42:30 +1000248 }
249 }
250}
251
252static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
253 struct pci_bus *bus, int all)
254{
255 resource_size_t segsz = phb->ioda.m64_segsize;
256 struct pci_dev *pdev;
257 struct resource *r;
258 struct pnv_ioda_pe *master_pe, *pe;
259 unsigned long size, *pe_alloc;
260 bool found;
261 int start, i, j;
262
263 /* Root bus shouldn't use M64 */
264 if (pci_is_root_bus(bus))
265 return IODA_INVALID_PE;
266
267 /* We support only one M64 window on each bus */
268 found = false;
269 pci_bus_for_each_resource(bus, r, i) {
270 if (r && r->parent &&
271 pnv_pci_is_mem_pref_64(r->flags)) {
272 found = true;
273 break;
274 }
275 }
276
277 /* No M64 window found ? */
278 if (!found)
279 return IODA_INVALID_PE;
280
281 /* Allocate bitmap */
282 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
283 pe_alloc = kzalloc(size, GFP_KERNEL);
284 if (!pe_alloc) {
285 pr_warn("%s: Out of memory !\n",
286 __func__);
287 return IODA_INVALID_PE;
288 }
289
290 /*
291 * Figure out reserved PE numbers by the PE
292 * the its child PEs.
293 */
294 start = (r->start - phb->ioda.m64_base) / segsz;
295 for (i = 0; i < resource_size(r) / segsz; i++)
296 set_bit(start + i, pe_alloc);
297
298 if (all)
299 goto done;
300
301 /*
302 * If the PE doesn't cover all subordinate buses,
303 * we need subtract from reserved PEs for children.
304 */
305 list_for_each_entry(pdev, &bus->devices, bus_list) {
306 if (!pdev->subordinate)
307 continue;
308
309 pci_bus_for_each_resource(pdev->subordinate, r, i) {
310 if (!r || !r->parent ||
311 !pnv_pci_is_mem_pref_64(r->flags))
312 continue;
313
314 start = (r->start - phb->ioda.m64_base) / segsz;
315 for (j = 0; j < resource_size(r) / segsz ; j++)
316 clear_bit(start + j, pe_alloc);
317 }
318 }
319
320 /*
321 * the current bus might not own M64 window and that's all
322 * contributed by its child buses. For the case, we needn't
323 * pick M64 dependent PE#.
324 */
325 if (bitmap_empty(pe_alloc, phb->ioda.total_pe)) {
326 kfree(pe_alloc);
327 return IODA_INVALID_PE;
328 }
329
330 /*
331 * Figure out the master PE and put all slave PEs to master
332 * PE's list to form compound PE.
333 */
334done:
335 master_pe = NULL;
336 i = -1;
337 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
338 phb->ioda.total_pe) {
339 pe = &phb->ioda.pe_array[i];
Guo Chao262af552014-07-21 14:42:30 +1000340
341 if (!master_pe) {
342 pe->flags |= PNV_IODA_PE_MASTER;
343 INIT_LIST_HEAD(&pe->slaves);
344 master_pe = pe;
345 } else {
346 pe->flags |= PNV_IODA_PE_SLAVE;
347 pe->master = master_pe;
348 list_add_tail(&pe->list, &master_pe->slaves);
349 }
350 }
351
352 kfree(pe_alloc);
353 return master_pe->pe_number;
354}
355
356static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
357{
358 struct pci_controller *hose = phb->hose;
359 struct device_node *dn = hose->dn;
360 struct resource *res;
361 const u32 *r;
362 u64 pci_addr;
363
Gavin Shan1665c4a2014-11-12 13:36:04 +1100364 /* FIXME: Support M64 for P7IOC */
365 if (phb->type != PNV_PHB_IODA2) {
366 pr_info(" Not support M64 window\n");
367 return;
368 }
369
Guo Chao262af552014-07-21 14:42:30 +1000370 if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
371 pr_info(" Firmware too old to support M64 window\n");
372 return;
373 }
374
375 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
376 if (!r) {
377 pr_info(" No <ibm,opal-m64-window> on %s\n",
378 dn->full_name);
379 return;
380 }
381
Guo Chao262af552014-07-21 14:42:30 +1000382 res = &hose->mem_resources[1];
383 res->start = of_translate_address(dn, r + 2);
384 res->end = res->start + of_read_number(r + 4, 2) - 1;
385 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
386 pci_addr = of_read_number(r, 2);
387 hose->mem_offset[1] = res->start - pci_addr;
388
389 phb->ioda.m64_size = resource_size(res);
390 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe;
391 phb->ioda.m64_base = pci_addr;
392
Wei Yange9863e62014-12-12 12:39:37 +0800393 pr_info(" MEM64 0x%016llx..0x%016llx -> 0x%016llx\n",
394 res->start, res->end, pci_addr);
395
Guo Chao262af552014-07-21 14:42:30 +1000396 /* Use last M64 BAR to cover M64 window */
397 phb->ioda.m64_bar_idx = 15;
398 phb->init_m64 = pnv_ioda2_init_m64;
Gavin Shan5ef73562014-11-12 13:36:06 +1100399 phb->reserve_m64_pe = pnv_ioda2_reserve_m64_pe;
Guo Chao262af552014-07-21 14:42:30 +1000400 phb->pick_m64_pe = pnv_ioda2_pick_m64_pe;
401}
402
Gavin Shan49dec922014-07-21 14:42:33 +1000403static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
404{
405 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
406 struct pnv_ioda_pe *slave;
407 s64 rc;
408
409 /* Fetch master PE */
410 if (pe->flags & PNV_IODA_PE_SLAVE) {
411 pe = pe->master;
Gavin Shanec8e4e92014-11-12 13:36:10 +1100412 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
413 return;
414
Gavin Shan49dec922014-07-21 14:42:33 +1000415 pe_no = pe->pe_number;
416 }
417
418 /* Freeze master PE */
419 rc = opal_pci_eeh_freeze_set(phb->opal_id,
420 pe_no,
421 OPAL_EEH_ACTION_SET_FREEZE_ALL);
422 if (rc != OPAL_SUCCESS) {
423 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
424 __func__, rc, phb->hose->global_number, pe_no);
425 return;
426 }
427
428 /* Freeze slave PEs */
429 if (!(pe->flags & PNV_IODA_PE_MASTER))
430 return;
431
432 list_for_each_entry(slave, &pe->slaves, list) {
433 rc = opal_pci_eeh_freeze_set(phb->opal_id,
434 slave->pe_number,
435 OPAL_EEH_ACTION_SET_FREEZE_ALL);
436 if (rc != OPAL_SUCCESS)
437 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
438 __func__, rc, phb->hose->global_number,
439 slave->pe_number);
440 }
441}
442
Anton Blancharde51df2c2014-08-20 08:55:18 +1000443static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
Gavin Shan49dec922014-07-21 14:42:33 +1000444{
445 struct pnv_ioda_pe *pe, *slave;
446 s64 rc;
447
448 /* Find master PE */
449 pe = &phb->ioda.pe_array[pe_no];
450 if (pe->flags & PNV_IODA_PE_SLAVE) {
451 pe = pe->master;
452 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
453 pe_no = pe->pe_number;
454 }
455
456 /* Clear frozen state for master PE */
457 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
458 if (rc != OPAL_SUCCESS) {
459 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
460 __func__, rc, opt, phb->hose->global_number, pe_no);
461 return -EIO;
462 }
463
464 if (!(pe->flags & PNV_IODA_PE_MASTER))
465 return 0;
466
467 /* Clear frozen state for slave PEs */
468 list_for_each_entry(slave, &pe->slaves, list) {
469 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
470 slave->pe_number,
471 opt);
472 if (rc != OPAL_SUCCESS) {
473 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
474 __func__, rc, opt, phb->hose->global_number,
475 slave->pe_number);
476 return -EIO;
477 }
478 }
479
480 return 0;
481}
482
483static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
484{
485 struct pnv_ioda_pe *slave, *pe;
486 u8 fstate, state;
487 __be16 pcierr;
488 s64 rc;
489
490 /* Sanity check on PE number */
491 if (pe_no < 0 || pe_no >= phb->ioda.total_pe)
492 return OPAL_EEH_STOPPED_PERM_UNAVAIL;
493
494 /*
495 * Fetch the master PE and the PE instance might be
496 * not initialized yet.
497 */
498 pe = &phb->ioda.pe_array[pe_no];
499 if (pe->flags & PNV_IODA_PE_SLAVE) {
500 pe = pe->master;
501 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
502 pe_no = pe->pe_number;
503 }
504
505 /* Check the master PE */
506 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
507 &state, &pcierr, NULL);
508 if (rc != OPAL_SUCCESS) {
509 pr_warn("%s: Failure %lld getting "
510 "PHB#%x-PE#%x state\n",
511 __func__, rc,
512 phb->hose->global_number, pe_no);
513 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
514 }
515
516 /* Check the slave PE */
517 if (!(pe->flags & PNV_IODA_PE_MASTER))
518 return state;
519
520 list_for_each_entry(slave, &pe->slaves, list) {
521 rc = opal_pci_eeh_freeze_status(phb->opal_id,
522 slave->pe_number,
523 &fstate,
524 &pcierr,
525 NULL);
526 if (rc != OPAL_SUCCESS) {
527 pr_warn("%s: Failure %lld getting "
528 "PHB#%x-PE#%x state\n",
529 __func__, rc,
530 phb->hose->global_number, slave->pe_number);
531 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
532 }
533
534 /*
535 * Override the result based on the ascending
536 * priority.
537 */
538 if (fstate > state)
539 state = fstate;
540 }
541
542 return state;
543}
544
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000545/* Currently those 2 are only used when MSIs are enabled, this will change
546 * but in the meantime, we need to protect them to avoid warnings
547 */
548#ifdef CONFIG_PCI_MSI
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800549static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000550{
551 struct pci_controller *hose = pci_bus_to_host(dev->bus);
552 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000553 struct pci_dn *pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000554
555 if (!pdn)
556 return NULL;
557 if (pdn->pe_number == IODA_INVALID_PE)
558 return NULL;
559 return &phb->ioda.pe_array[pdn->pe_number];
560}
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000561#endif /* CONFIG_PCI_MSI */
562
Gavin Shanb131a842014-11-12 13:36:08 +1100563static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
564 struct pnv_ioda_pe *parent,
565 struct pnv_ioda_pe *child,
566 bool is_add)
567{
568 const char *desc = is_add ? "adding" : "removing";
569 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
570 OPAL_REMOVE_PE_FROM_DOMAIN;
571 struct pnv_ioda_pe *slave;
572 long rc;
573
574 /* Parent PE affects child PE */
575 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
576 child->pe_number, op);
577 if (rc != OPAL_SUCCESS) {
578 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
579 rc, desc);
580 return -ENXIO;
581 }
582
583 if (!(child->flags & PNV_IODA_PE_MASTER))
584 return 0;
585
586 /* Compound case: parent PE affects slave PEs */
587 list_for_each_entry(slave, &child->slaves, list) {
588 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
589 slave->pe_number, op);
590 if (rc != OPAL_SUCCESS) {
591 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
592 rc, desc);
593 return -ENXIO;
594 }
595 }
596
597 return 0;
598}
599
600static int pnv_ioda_set_peltv(struct pnv_phb *phb,
601 struct pnv_ioda_pe *pe,
602 bool is_add)
603{
604 struct pnv_ioda_pe *slave;
Wei Yang781a8682015-03-25 16:23:57 +0800605 struct pci_dev *pdev = NULL;
Gavin Shanb131a842014-11-12 13:36:08 +1100606 int ret;
607
608 /*
609 * Clear PE frozen state. If it's master PE, we need
610 * clear slave PE frozen state as well.
611 */
612 if (is_add) {
613 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
614 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
615 if (pe->flags & PNV_IODA_PE_MASTER) {
616 list_for_each_entry(slave, &pe->slaves, list)
617 opal_pci_eeh_freeze_clear(phb->opal_id,
618 slave->pe_number,
619 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
620 }
621 }
622
623 /*
624 * Associate PE in PELT. We need add the PE into the
625 * corresponding PELT-V as well. Otherwise, the error
626 * originated from the PE might contribute to other
627 * PEs.
628 */
629 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
630 if (ret)
631 return ret;
632
633 /* For compound PEs, any one affects all of them */
634 if (pe->flags & PNV_IODA_PE_MASTER) {
635 list_for_each_entry(slave, &pe->slaves, list) {
636 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
637 if (ret)
638 return ret;
639 }
640 }
641
642 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
643 pdev = pe->pbus->self;
Wei Yang781a8682015-03-25 16:23:57 +0800644 else if (pe->flags & PNV_IODA_PE_DEV)
Gavin Shanb131a842014-11-12 13:36:08 +1100645 pdev = pe->pdev->bus->self;
Wei Yang781a8682015-03-25 16:23:57 +0800646#ifdef CONFIG_PCI_IOV
647 else if (pe->flags & PNV_IODA_PE_VF)
648 pdev = pe->parent_dev->bus->self;
649#endif /* CONFIG_PCI_IOV */
Gavin Shanb131a842014-11-12 13:36:08 +1100650 while (pdev) {
651 struct pci_dn *pdn = pci_get_pdn(pdev);
652 struct pnv_ioda_pe *parent;
653
654 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
655 parent = &phb->ioda.pe_array[pdn->pe_number];
656 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
657 if (ret)
658 return ret;
659 }
660
661 pdev = pdev->bus->self;
662 }
663
664 return 0;
665}
666
Wei Yang781a8682015-03-25 16:23:57 +0800667#ifdef CONFIG_PCI_IOV
668static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
669{
670 struct pci_dev *parent;
671 uint8_t bcomp, dcomp, fcomp;
672 int64_t rc;
673 long rid_end, rid;
674
675 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
676 if (pe->pbus) {
677 int count;
678
679 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
680 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
681 parent = pe->pbus->self;
682 if (pe->flags & PNV_IODA_PE_BUS_ALL)
683 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
684 else
685 count = 1;
686
687 switch(count) {
688 case 1: bcomp = OpalPciBusAll; break;
689 case 2: bcomp = OpalPciBus7Bits; break;
690 case 4: bcomp = OpalPciBus6Bits; break;
691 case 8: bcomp = OpalPciBus5Bits; break;
692 case 16: bcomp = OpalPciBus4Bits; break;
693 case 32: bcomp = OpalPciBus3Bits; break;
694 default:
695 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
696 count);
697 /* Do an exact match only */
698 bcomp = OpalPciBusAll;
699 }
700 rid_end = pe->rid + (count << 8);
701 } else {
702 if (pe->flags & PNV_IODA_PE_VF)
703 parent = pe->parent_dev;
704 else
705 parent = pe->pdev->bus->self;
706 bcomp = OpalPciBusAll;
707 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
708 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
709 rid_end = pe->rid + 1;
710 }
711
712 /* Clear the reverse map */
713 for (rid = pe->rid; rid < rid_end; rid++)
714 phb->ioda.pe_rmap[rid] = 0;
715
716 /* Release from all parents PELT-V */
717 while (parent) {
718 struct pci_dn *pdn = pci_get_pdn(parent);
719 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
720 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
721 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
722 /* XXX What to do in case of error ? */
723 }
724 parent = parent->bus->self;
725 }
726
727 opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
728 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
729
730 /* Disassociate PE in PELT */
731 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
732 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
733 if (rc)
734 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
735 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
736 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
737 if (rc)
738 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
739
740 pe->pbus = NULL;
741 pe->pdev = NULL;
742 pe->parent_dev = NULL;
743
744 return 0;
745}
746#endif /* CONFIG_PCI_IOV */
747
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800748static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000749{
750 struct pci_dev *parent;
751 uint8_t bcomp, dcomp, fcomp;
752 long rc, rid_end, rid;
753
754 /* Bus validation ? */
755 if (pe->pbus) {
756 int count;
757
758 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
759 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
760 parent = pe->pbus->self;
Gavin Shanfb446ad2012-08-20 03:49:14 +0000761 if (pe->flags & PNV_IODA_PE_BUS_ALL)
762 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
763 else
764 count = 1;
765
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000766 switch(count) {
767 case 1: bcomp = OpalPciBusAll; break;
768 case 2: bcomp = OpalPciBus7Bits; break;
769 case 4: bcomp = OpalPciBus6Bits; break;
770 case 8: bcomp = OpalPciBus5Bits; break;
771 case 16: bcomp = OpalPciBus4Bits; break;
772 case 32: bcomp = OpalPciBus3Bits; break;
773 default:
Wei Yang781a8682015-03-25 16:23:57 +0800774 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
775 count);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000776 /* Do an exact match only */
777 bcomp = OpalPciBusAll;
778 }
779 rid_end = pe->rid + (count << 8);
780 } else {
Wei Yang781a8682015-03-25 16:23:57 +0800781#ifdef CONFIG_PCI_IOV
782 if (pe->flags & PNV_IODA_PE_VF)
783 parent = pe->parent_dev;
784 else
785#endif /* CONFIG_PCI_IOV */
786 parent = pe->pdev->bus->self;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000787 bcomp = OpalPciBusAll;
788 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
789 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
790 rid_end = pe->rid + 1;
791 }
792
Gavin Shan631ad692013-11-04 16:32:46 +0800793 /*
794 * Associate PE in PELT. We need add the PE into the
795 * corresponding PELT-V as well. Otherwise, the error
796 * originated from the PE might contribute to other
797 * PEs.
798 */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000799 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
800 bcomp, dcomp, fcomp, OPAL_MAP_PE);
801 if (rc) {
802 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
803 return -ENXIO;
804 }
Gavin Shan631ad692013-11-04 16:32:46 +0800805
Gavin Shanb131a842014-11-12 13:36:08 +1100806 /* Configure PELTV */
807 pnv_ioda_set_peltv(phb, pe, true);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000808
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000809 /* Setup reverse map */
810 for (rid = pe->rid; rid < rid_end; rid++)
811 phb->ioda.pe_rmap[rid] = pe->pe_number;
812
813 /* Setup one MVTs on IODA1 */
Gavin Shan4773f762014-11-12 13:36:09 +1100814 if (phb->type != PNV_PHB_IODA1) {
815 pe->mve_number = 0;
816 goto out;
817 }
818
819 pe->mve_number = pe->pe_number;
820 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
821 if (rc != OPAL_SUCCESS) {
822 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
823 rc, pe->mve_number);
824 pe->mve_number = -1;
825 } else {
826 rc = opal_pci_set_mve_enable(phb->opal_id,
827 pe->mve_number, OPAL_ENABLE_MVE);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000828 if (rc) {
Gavin Shan4773f762014-11-12 13:36:09 +1100829 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000830 rc, pe->mve_number);
831 pe->mve_number = -1;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000832 }
Gavin Shan4773f762014-11-12 13:36:09 +1100833 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000834
Gavin Shan4773f762014-11-12 13:36:09 +1100835out:
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000836 return 0;
837}
838
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800839static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
840 struct pnv_ioda_pe *pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000841{
842 struct pnv_ioda_pe *lpe;
843
Gavin Shan7ebdf952012-08-20 03:49:15 +0000844 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000845 if (lpe->dma_weight < pe->dma_weight) {
Gavin Shan7ebdf952012-08-20 03:49:15 +0000846 list_add_tail(&pe->dma_link, &lpe->dma_link);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000847 return;
848 }
849 }
Gavin Shan7ebdf952012-08-20 03:49:15 +0000850 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000851}
852
853static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
854{
855 /* This is quite simplistic. The "base" weight of a device
856 * is 10. 0 means no DMA is to be accounted for it.
857 */
858
859 /* If it's a bridge, no DMA */
860 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
861 return 0;
862
863 /* Reduce the weight of slow USB controllers */
864 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
865 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
866 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
867 return 3;
868
869 /* Increase the weight of RAID (includes Obsidian) */
870 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
871 return 15;
872
873 /* Default */
874 return 10;
875}
876
Wei Yang781a8682015-03-25 16:23:57 +0800877#ifdef CONFIG_PCI_IOV
878static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
879{
880 struct pci_dn *pdn = pci_get_pdn(dev);
881 int i;
882 struct resource *res, res2;
883 resource_size_t size;
884 u16 num_vfs;
885
886 if (!dev->is_physfn)
887 return -EINVAL;
888
889 /*
890 * "offset" is in VFs. The M64 windows are sized so that when they
891 * are segmented, each segment is the same size as the IOV BAR.
892 * Each segment is in a separate PE, and the high order bits of the
893 * address are the PE number. Therefore, each VF's BAR is in a
894 * separate PE, and changing the IOV BAR start address changes the
895 * range of PEs the VFs are in.
896 */
897 num_vfs = pdn->num_vfs;
898 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
899 res = &dev->resource[i + PCI_IOV_RESOURCES];
900 if (!res->flags || !res->parent)
901 continue;
902
903 if (!pnv_pci_is_mem_pref_64(res->flags))
904 continue;
905
906 /*
907 * The actual IOV BAR range is determined by the start address
908 * and the actual size for num_vfs VFs BAR. This check is to
909 * make sure that after shifting, the range will not overlap
910 * with another device.
911 */
912 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
913 res2.flags = res->flags;
914 res2.start = res->start + (size * offset);
915 res2.end = res2.start + (size * num_vfs) - 1;
916
917 if (res2.end > res->end) {
918 dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
919 i, &res2, res, num_vfs, offset);
920 return -EBUSY;
921 }
922 }
923
924 /*
925 * After doing so, there would be a "hole" in the /proc/iomem when
926 * offset is a positive value. It looks like the device return some
927 * mmio back to the system, which actually no one could use it.
928 */
929 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
930 res = &dev->resource[i + PCI_IOV_RESOURCES];
931 if (!res->flags || !res->parent)
932 continue;
933
934 if (!pnv_pci_is_mem_pref_64(res->flags))
935 continue;
936
937 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
938 res2 = *res;
939 res->start += size * offset;
940
941 dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
942 i, &res2, res, num_vfs, offset);
943 pci_update_resource(dev, i + PCI_IOV_RESOURCES);
944 }
945 return 0;
946}
947#endif /* CONFIG_PCI_IOV */
948
Gavin Shanfb446ad2012-08-20 03:49:14 +0000949#if 0
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800950static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000951{
952 struct pci_controller *hose = pci_bus_to_host(dev->bus);
953 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000954 struct pci_dn *pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000955 struct pnv_ioda_pe *pe;
956 int pe_num;
957
958 if (!pdn) {
959 pr_err("%s: Device tree node not associated properly\n",
960 pci_name(dev));
961 return NULL;
962 }
963 if (pdn->pe_number != IODA_INVALID_PE)
964 return NULL;
965
966 /* PE#0 has been pre-set */
967 if (dev->bus->number == 0)
968 pe_num = 0;
969 else
970 pe_num = pnv_ioda_alloc_pe(phb);
971 if (pe_num == IODA_INVALID_PE) {
972 pr_warning("%s: Not enough PE# available, disabling device\n",
973 pci_name(dev));
974 return NULL;
975 }
976
977 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
978 * pointer in the PE data structure, both should be destroyed at the
979 * same time. However, this needs to be looked at more closely again
980 * once we actually start removing things (Hotplug, SR-IOV, ...)
981 *
982 * At some point we want to remove the PDN completely anyways
983 */
984 pe = &phb->ioda.pe_array[pe_num];
985 pci_dev_get(dev);
986 pdn->pcidev = dev;
987 pdn->pe_number = pe_num;
988 pe->pdev = dev;
989 pe->pbus = NULL;
990 pe->tce32_seg = -1;
991 pe->mve_number = -1;
992 pe->rid = dev->bus->number << 8 | pdn->devfn;
993
994 pe_info(pe, "Associated device to PE\n");
995
996 if (pnv_ioda_configure_pe(phb, pe)) {
997 /* XXX What do we do here ? */
998 if (pe_num)
999 pnv_ioda_free_pe(phb, pe_num);
1000 pdn->pe_number = IODA_INVALID_PE;
1001 pe->pdev = NULL;
1002 pci_dev_put(dev);
1003 return NULL;
1004 }
1005
1006 /* Assign a DMA weight to the device */
1007 pe->dma_weight = pnv_ioda_dma_weight(dev);
1008 if (pe->dma_weight != 0) {
1009 phb->ioda.dma_weight += pe->dma_weight;
1010 phb->ioda.dma_pe_count++;
1011 }
1012
1013 /* Link the PE */
1014 pnv_ioda_link_pe_by_weight(phb, pe);
1015
1016 return pe;
1017}
Gavin Shanfb446ad2012-08-20 03:49:14 +00001018#endif /* Useful for SRIOV case */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001019
1020static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
1021{
1022 struct pci_dev *dev;
1023
1024 list_for_each_entry(dev, &bus->devices, bus_list) {
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00001025 struct pci_dn *pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001026
1027 if (pdn == NULL) {
1028 pr_warn("%s: No device node associated with device !\n",
1029 pci_name(dev));
1030 continue;
1031 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001032 pdn->pe_number = pe->pe_number;
1033 pe->dma_weight += pnv_ioda_dma_weight(dev);
Gavin Shanfb446ad2012-08-20 03:49:14 +00001034 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001035 pnv_ioda_setup_same_PE(dev->subordinate, pe);
1036 }
1037}
1038
Gavin Shanfb446ad2012-08-20 03:49:14 +00001039/*
1040 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1041 * single PCI bus. Another one that contains the primary PCI bus and its
1042 * subordinate PCI devices and buses. The second type of PE is normally
1043 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1044 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001045static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001046{
Gavin Shanfb446ad2012-08-20 03:49:14 +00001047 struct pci_controller *hose = pci_bus_to_host(bus);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001048 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001049 struct pnv_ioda_pe *pe;
Guo Chao262af552014-07-21 14:42:30 +10001050 int pe_num = IODA_INVALID_PE;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001051
Guo Chao262af552014-07-21 14:42:30 +10001052 /* Check if PE is determined by M64 */
1053 if (phb->pick_m64_pe)
1054 pe_num = phb->pick_m64_pe(phb, bus, all);
1055
1056 /* The PE number isn't pinned by M64 */
1057 if (pe_num == IODA_INVALID_PE)
1058 pe_num = pnv_ioda_alloc_pe(phb);
1059
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001060 if (pe_num == IODA_INVALID_PE) {
Gavin Shanfb446ad2012-08-20 03:49:14 +00001061 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1062 __func__, pci_domain_nr(bus), bus->number);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001063 return;
1064 }
1065
1066 pe = &phb->ioda.pe_array[pe_num];
Guo Chao262af552014-07-21 14:42:30 +10001067 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001068 pe->pbus = bus;
1069 pe->pdev = NULL;
1070 pe->tce32_seg = -1;
1071 pe->mve_number = -1;
Yinghai Lub918c622012-05-17 18:51:11 -07001072 pe->rid = bus->busn_res.start << 8;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001073 pe->dma_weight = 0;
1074
Gavin Shanfb446ad2012-08-20 03:49:14 +00001075 if (all)
1076 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
1077 bus->busn_res.start, bus->busn_res.end, pe_num);
1078 else
1079 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
1080 bus->busn_res.start, pe_num);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001081
1082 if (pnv_ioda_configure_pe(phb, pe)) {
1083 /* XXX What do we do here ? */
1084 if (pe_num)
1085 pnv_ioda_free_pe(phb, pe_num);
1086 pe->pbus = NULL;
1087 return;
1088 }
1089
1090 /* Associate it with all child devices */
1091 pnv_ioda_setup_same_PE(bus, pe);
1092
Gavin Shan7ebdf952012-08-20 03:49:15 +00001093 /* Put PE to the list */
1094 list_add_tail(&pe->list, &phb->ioda.pe_list);
1095
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001096 /* Account for one DMA PE if at least one DMA capable device exist
1097 * below the bridge
1098 */
1099 if (pe->dma_weight != 0) {
1100 phb->ioda.dma_weight += pe->dma_weight;
1101 phb->ioda.dma_pe_count++;
1102 }
1103
1104 /* Link the PE */
1105 pnv_ioda_link_pe_by_weight(phb, pe);
1106}
1107
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001108static void pnv_ioda_setup_PEs(struct pci_bus *bus)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001109{
1110 struct pci_dev *dev;
Gavin Shanfb446ad2012-08-20 03:49:14 +00001111
1112 pnv_ioda_setup_bus_PE(bus, 0);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001113
1114 list_for_each_entry(dev, &bus->devices, bus_list) {
Gavin Shanfb446ad2012-08-20 03:49:14 +00001115 if (dev->subordinate) {
1116 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
1117 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
1118 else
1119 pnv_ioda_setup_PEs(dev->subordinate);
1120 }
1121 }
1122}
1123
1124/*
1125 * Configure PEs so that the downstream PCI buses and devices
1126 * could have their associated PE#. Unfortunately, we didn't
1127 * figure out the way to identify the PLX bridge yet. So we
1128 * simply put the PCI bus and the subordinate behind the root
1129 * port to PE# here. The game rule here is expected to be changed
1130 * as soon as we can detected PLX bridge correctly.
1131 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001132static void pnv_pci_ioda_setup_PEs(void)
Gavin Shanfb446ad2012-08-20 03:49:14 +00001133{
1134 struct pci_controller *hose, *tmp;
Guo Chao262af552014-07-21 14:42:30 +10001135 struct pnv_phb *phb;
Gavin Shanfb446ad2012-08-20 03:49:14 +00001136
1137 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
Guo Chao262af552014-07-21 14:42:30 +10001138 phb = hose->private_data;
1139
1140 /* M64 layout might affect PE allocation */
Gavin Shan5ef73562014-11-12 13:36:06 +11001141 if (phb->reserve_m64_pe)
1142 phb->reserve_m64_pe(phb);
Guo Chao262af552014-07-21 14:42:30 +10001143
Gavin Shanfb446ad2012-08-20 03:49:14 +00001144 pnv_ioda_setup_PEs(hose->bus);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001145 }
1146}
1147
Gavin Shana8b2f822015-03-25 16:23:52 +08001148#ifdef CONFIG_PCI_IOV
Wei Yang781a8682015-03-25 16:23:57 +08001149static int pnv_pci_vf_release_m64(struct pci_dev *pdev)
1150{
1151 struct pci_bus *bus;
1152 struct pci_controller *hose;
1153 struct pnv_phb *phb;
1154 struct pci_dn *pdn;
Wei Yang02639b02015-03-25 16:23:59 +08001155 int i, j;
Wei Yang781a8682015-03-25 16:23:57 +08001156
1157 bus = pdev->bus;
1158 hose = pci_bus_to_host(bus);
1159 phb = hose->private_data;
1160 pdn = pci_get_pdn(pdev);
1161
Wei Yang02639b02015-03-25 16:23:59 +08001162 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1163 for (j = 0; j < M64_PER_IOV; j++) {
1164 if (pdn->m64_wins[i][j] == IODA_INVALID_M64)
1165 continue;
1166 opal_pci_phb_mmio_enable(phb->opal_id,
1167 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 0);
1168 clear_bit(pdn->m64_wins[i][j], &phb->ioda.m64_bar_alloc);
1169 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1170 }
Wei Yang781a8682015-03-25 16:23:57 +08001171
1172 return 0;
1173}
1174
Wei Yang02639b02015-03-25 16:23:59 +08001175static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
Wei Yang781a8682015-03-25 16:23:57 +08001176{
1177 struct pci_bus *bus;
1178 struct pci_controller *hose;
1179 struct pnv_phb *phb;
1180 struct pci_dn *pdn;
1181 unsigned int win;
1182 struct resource *res;
Wei Yang02639b02015-03-25 16:23:59 +08001183 int i, j;
Wei Yang781a8682015-03-25 16:23:57 +08001184 int64_t rc;
Wei Yang02639b02015-03-25 16:23:59 +08001185 int total_vfs;
1186 resource_size_t size, start;
1187 int pe_num;
1188 int vf_groups;
1189 int vf_per_group;
Wei Yang781a8682015-03-25 16:23:57 +08001190
1191 bus = pdev->bus;
1192 hose = pci_bus_to_host(bus);
1193 phb = hose->private_data;
1194 pdn = pci_get_pdn(pdev);
Wei Yang02639b02015-03-25 16:23:59 +08001195 total_vfs = pci_sriov_get_totalvfs(pdev);
Wei Yang781a8682015-03-25 16:23:57 +08001196
1197 /* Initialize the m64_wins to IODA_INVALID_M64 */
1198 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
Wei Yang02639b02015-03-25 16:23:59 +08001199 for (j = 0; j < M64_PER_IOV; j++)
1200 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1201
1202 if (pdn->m64_per_iov == M64_PER_IOV) {
1203 vf_groups = (num_vfs <= M64_PER_IOV) ? num_vfs: M64_PER_IOV;
1204 vf_per_group = (num_vfs <= M64_PER_IOV)? 1:
1205 roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1206 } else {
1207 vf_groups = 1;
1208 vf_per_group = 1;
1209 }
Wei Yang781a8682015-03-25 16:23:57 +08001210
1211 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1212 res = &pdev->resource[i + PCI_IOV_RESOURCES];
1213 if (!res->flags || !res->parent)
1214 continue;
1215
1216 if (!pnv_pci_is_mem_pref_64(res->flags))
1217 continue;
1218
Wei Yang02639b02015-03-25 16:23:59 +08001219 for (j = 0; j < vf_groups; j++) {
1220 do {
1221 win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1222 phb->ioda.m64_bar_idx + 1, 0);
Wei Yang781a8682015-03-25 16:23:57 +08001223
Wei Yang02639b02015-03-25 16:23:59 +08001224 if (win >= phb->ioda.m64_bar_idx + 1)
1225 goto m64_failed;
1226 } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
Wei Yang781a8682015-03-25 16:23:57 +08001227
Wei Yang02639b02015-03-25 16:23:59 +08001228 pdn->m64_wins[i][j] = win;
Wei Yang781a8682015-03-25 16:23:57 +08001229
Wei Yang02639b02015-03-25 16:23:59 +08001230 if (pdn->m64_per_iov == M64_PER_IOV) {
1231 size = pci_iov_resource_size(pdev,
1232 PCI_IOV_RESOURCES + i);
1233 size = size * vf_per_group;
1234 start = res->start + size * j;
1235 } else {
1236 size = resource_size(res);
1237 start = res->start;
1238 }
1239
1240 /* Map the M64 here */
1241 if (pdn->m64_per_iov == M64_PER_IOV) {
1242 pe_num = pdn->offset + j;
1243 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1244 pe_num, OPAL_M64_WINDOW_TYPE,
1245 pdn->m64_wins[i][j], 0);
1246 }
1247
1248 rc = opal_pci_set_phb_mem_window(phb->opal_id,
Wei Yang781a8682015-03-25 16:23:57 +08001249 OPAL_M64_WINDOW_TYPE,
Wei Yang02639b02015-03-25 16:23:59 +08001250 pdn->m64_wins[i][j],
1251 start,
Wei Yang781a8682015-03-25 16:23:57 +08001252 0, /* unused */
Wei Yang02639b02015-03-25 16:23:59 +08001253 size);
Wei Yang781a8682015-03-25 16:23:57 +08001254
Wei Yang02639b02015-03-25 16:23:59 +08001255
1256 if (rc != OPAL_SUCCESS) {
1257 dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1258 win, rc);
1259 goto m64_failed;
1260 }
1261
1262 if (pdn->m64_per_iov == M64_PER_IOV)
1263 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1264 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 2);
1265 else
1266 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1267 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 1);
1268
1269 if (rc != OPAL_SUCCESS) {
1270 dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1271 win, rc);
1272 goto m64_failed;
1273 }
Wei Yang781a8682015-03-25 16:23:57 +08001274 }
1275 }
1276 return 0;
1277
1278m64_failed:
1279 pnv_pci_vf_release_m64(pdev);
1280 return -EBUSY;
1281}
1282
1283static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1284{
1285 struct pci_bus *bus;
1286 struct pci_controller *hose;
1287 struct pnv_phb *phb;
1288 struct iommu_table *tbl;
1289 unsigned long addr;
1290 int64_t rc;
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001291 struct iommu_table_group *table_group;
Wei Yang781a8682015-03-25 16:23:57 +08001292
1293 bus = dev->bus;
1294 hose = pci_bus_to_host(bus);
1295 phb = hose->private_data;
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001296 tbl = pe->table_group.tables[0];
Wei Yang781a8682015-03-25 16:23:57 +08001297 addr = tbl->it_base;
1298
1299 opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
1300 pe->pe_number << 1, 1, __pa(addr),
1301 0, 0x1000);
1302
1303 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1304 pe->pe_number,
1305 (pe->pe_number << 1) + 1,
1306 pe->tce_bypass_base,
1307 0);
1308 if (rc)
1309 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
1310
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001311 table_group = tbl->it_table_group;
1312 if (table_group->group) {
1313 iommu_group_put(table_group->group);
1314 BUG_ON(table_group->group);
Alexey Kardashevskiyac9a5882015-06-05 16:34:56 +10001315 }
Wei Yang781a8682015-03-25 16:23:57 +08001316 iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
1317 free_pages(addr, get_order(TCE32_TABLE_SIZE));
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001318 pe->table_group.tables[0] = NULL;
Wei Yang781a8682015-03-25 16:23:57 +08001319}
1320
Wei Yang02639b02015-03-25 16:23:59 +08001321static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
Wei Yang781a8682015-03-25 16:23:57 +08001322{
1323 struct pci_bus *bus;
1324 struct pci_controller *hose;
1325 struct pnv_phb *phb;
1326 struct pnv_ioda_pe *pe, *pe_n;
1327 struct pci_dn *pdn;
Wei Yang02639b02015-03-25 16:23:59 +08001328 u16 vf_index;
1329 int64_t rc;
Wei Yang781a8682015-03-25 16:23:57 +08001330
1331 bus = pdev->bus;
1332 hose = pci_bus_to_host(bus);
1333 phb = hose->private_data;
Wei Yang02639b02015-03-25 16:23:59 +08001334 pdn = pci_get_pdn(pdev);
Wei Yang781a8682015-03-25 16:23:57 +08001335
1336 if (!pdev->is_physfn)
1337 return;
1338
Wei Yang02639b02015-03-25 16:23:59 +08001339 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1340 int vf_group;
1341 int vf_per_group;
1342 int vf_index1;
1343
1344 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1345
1346 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++)
1347 for (vf_index = vf_group * vf_per_group;
1348 vf_index < (vf_group + 1) * vf_per_group &&
1349 vf_index < num_vfs;
1350 vf_index++)
1351 for (vf_index1 = vf_group * vf_per_group;
1352 vf_index1 < (vf_group + 1) * vf_per_group &&
1353 vf_index1 < num_vfs;
1354 vf_index1++){
1355
1356 rc = opal_pci_set_peltv(phb->opal_id,
1357 pdn->offset + vf_index,
1358 pdn->offset + vf_index1,
1359 OPAL_REMOVE_PE_FROM_DOMAIN);
1360
1361 if (rc)
1362 dev_warn(&pdev->dev, "%s: Failed to unlink same group PE#%d(%lld)\n",
1363 __func__,
1364 pdn->offset + vf_index1, rc);
1365 }
1366 }
1367
Wei Yang781a8682015-03-25 16:23:57 +08001368 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1369 if (pe->parent_dev != pdev)
1370 continue;
1371
1372 pnv_pci_ioda2_release_dma_pe(pdev, pe);
1373
1374 /* Remove from list */
1375 mutex_lock(&phb->ioda.pe_list_mutex);
1376 list_del(&pe->list);
1377 mutex_unlock(&phb->ioda.pe_list_mutex);
1378
1379 pnv_ioda_deconfigure_pe(phb, pe);
1380
1381 pnv_ioda_free_pe(phb, pe->pe_number);
1382 }
1383}
1384
1385void pnv_pci_sriov_disable(struct pci_dev *pdev)
1386{
1387 struct pci_bus *bus;
1388 struct pci_controller *hose;
1389 struct pnv_phb *phb;
1390 struct pci_dn *pdn;
1391 struct pci_sriov *iov;
1392 u16 num_vfs;
1393
1394 bus = pdev->bus;
1395 hose = pci_bus_to_host(bus);
1396 phb = hose->private_data;
1397 pdn = pci_get_pdn(pdev);
1398 iov = pdev->sriov;
1399 num_vfs = pdn->num_vfs;
1400
1401 /* Release VF PEs */
Wei Yang02639b02015-03-25 16:23:59 +08001402 pnv_ioda_release_vf_PE(pdev, num_vfs);
Wei Yang781a8682015-03-25 16:23:57 +08001403
1404 if (phb->type == PNV_PHB_IODA2) {
Wei Yang02639b02015-03-25 16:23:59 +08001405 if (pdn->m64_per_iov == 1)
1406 pnv_pci_vf_resource_shift(pdev, -pdn->offset);
Wei Yang781a8682015-03-25 16:23:57 +08001407
1408 /* Release M64 windows */
1409 pnv_pci_vf_release_m64(pdev);
1410
1411 /* Release PE numbers */
1412 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1413 pdn->offset = 0;
1414 }
1415}
1416
1417static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1418 struct pnv_ioda_pe *pe);
1419static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1420{
1421 struct pci_bus *bus;
1422 struct pci_controller *hose;
1423 struct pnv_phb *phb;
1424 struct pnv_ioda_pe *pe;
1425 int pe_num;
1426 u16 vf_index;
1427 struct pci_dn *pdn;
Wei Yang02639b02015-03-25 16:23:59 +08001428 int64_t rc;
Wei Yang781a8682015-03-25 16:23:57 +08001429
1430 bus = pdev->bus;
1431 hose = pci_bus_to_host(bus);
1432 phb = hose->private_data;
1433 pdn = pci_get_pdn(pdev);
1434
1435 if (!pdev->is_physfn)
1436 return;
1437
1438 /* Reserve PE for each VF */
1439 for (vf_index = 0; vf_index < num_vfs; vf_index++) {
1440 pe_num = pdn->offset + vf_index;
1441
1442 pe = &phb->ioda.pe_array[pe_num];
1443 pe->pe_number = pe_num;
1444 pe->phb = phb;
1445 pe->flags = PNV_IODA_PE_VF;
1446 pe->pbus = NULL;
1447 pe->parent_dev = pdev;
1448 pe->tce32_seg = -1;
1449 pe->mve_number = -1;
1450 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1451 pci_iov_virtfn_devfn(pdev, vf_index);
1452
1453 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
1454 hose->global_number, pdev->bus->number,
1455 PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1456 PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1457
1458 if (pnv_ioda_configure_pe(phb, pe)) {
1459 /* XXX What do we do here ? */
1460 if (pe_num)
1461 pnv_ioda_free_pe(phb, pe_num);
1462 pe->pdev = NULL;
1463 continue;
1464 }
1465
Wei Yang781a8682015-03-25 16:23:57 +08001466 /* Put PE to the list */
1467 mutex_lock(&phb->ioda.pe_list_mutex);
1468 list_add_tail(&pe->list, &phb->ioda.pe_list);
1469 mutex_unlock(&phb->ioda.pe_list_mutex);
1470
1471 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1472 }
Wei Yang02639b02015-03-25 16:23:59 +08001473
1474 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1475 int vf_group;
1476 int vf_per_group;
1477 int vf_index1;
1478
1479 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1480
1481 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++) {
1482 for (vf_index = vf_group * vf_per_group;
1483 vf_index < (vf_group + 1) * vf_per_group &&
1484 vf_index < num_vfs;
1485 vf_index++) {
1486 for (vf_index1 = vf_group * vf_per_group;
1487 vf_index1 < (vf_group + 1) * vf_per_group &&
1488 vf_index1 < num_vfs;
1489 vf_index1++) {
1490
1491 rc = opal_pci_set_peltv(phb->opal_id,
1492 pdn->offset + vf_index,
1493 pdn->offset + vf_index1,
1494 OPAL_ADD_PE_TO_DOMAIN);
1495
1496 if (rc)
1497 dev_warn(&pdev->dev, "%s: Failed to link same group PE#%d(%lld)\n",
1498 __func__,
1499 pdn->offset + vf_index1, rc);
1500 }
1501 }
1502 }
1503 }
Wei Yang781a8682015-03-25 16:23:57 +08001504}
1505
1506int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1507{
1508 struct pci_bus *bus;
1509 struct pci_controller *hose;
1510 struct pnv_phb *phb;
1511 struct pci_dn *pdn;
1512 int ret;
1513
1514 bus = pdev->bus;
1515 hose = pci_bus_to_host(bus);
1516 phb = hose->private_data;
1517 pdn = pci_get_pdn(pdev);
1518
1519 if (phb->type == PNV_PHB_IODA2) {
1520 /* Calculate available PE for required VFs */
1521 mutex_lock(&phb->ioda.pe_alloc_mutex);
1522 pdn->offset = bitmap_find_next_zero_area(
1523 phb->ioda.pe_alloc, phb->ioda.total_pe,
1524 0, num_vfs, 0);
1525 if (pdn->offset >= phb->ioda.total_pe) {
1526 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1527 dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1528 pdn->offset = 0;
1529 return -EBUSY;
1530 }
1531 bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1532 pdn->num_vfs = num_vfs;
1533 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1534
1535 /* Assign M64 window accordingly */
Wei Yang02639b02015-03-25 16:23:59 +08001536 ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
Wei Yang781a8682015-03-25 16:23:57 +08001537 if (ret) {
1538 dev_info(&pdev->dev, "Not enough M64 window resources\n");
1539 goto m64_failed;
1540 }
1541
1542 /*
1543 * When using one M64 BAR to map one IOV BAR, we need to shift
1544 * the IOV BAR according to the PE# allocated to the VFs.
1545 * Otherwise, the PE# for the VF will conflict with others.
1546 */
Wei Yang02639b02015-03-25 16:23:59 +08001547 if (pdn->m64_per_iov == 1) {
1548 ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
1549 if (ret)
1550 goto m64_failed;
1551 }
Wei Yang781a8682015-03-25 16:23:57 +08001552 }
1553
1554 /* Setup VF PEs */
1555 pnv_ioda_setup_vf_PE(pdev, num_vfs);
1556
1557 return 0;
1558
1559m64_failed:
1560 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1561 pdn->offset = 0;
1562
1563 return ret;
1564}
1565
Gavin Shana8b2f822015-03-25 16:23:52 +08001566int pcibios_sriov_disable(struct pci_dev *pdev)
1567{
Wei Yang781a8682015-03-25 16:23:57 +08001568 pnv_pci_sriov_disable(pdev);
1569
Gavin Shana8b2f822015-03-25 16:23:52 +08001570 /* Release PCI data */
1571 remove_dev_pci_data(pdev);
1572 return 0;
1573}
1574
1575int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1576{
1577 /* Allocate PCI data */
1578 add_dev_pci_data(pdev);
Wei Yang781a8682015-03-25 16:23:57 +08001579
1580 pnv_pci_sriov_enable(pdev, num_vfs);
Gavin Shana8b2f822015-03-25 16:23:52 +08001581 return 0;
1582}
1583#endif /* CONFIG_PCI_IOV */
1584
Gavin Shan959c9bd2013-04-25 19:21:02 +00001585static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001586{
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00001587 struct pci_dn *pdn = pci_get_pdn(pdev);
Gavin Shan959c9bd2013-04-25 19:21:02 +00001588 struct pnv_ioda_pe *pe;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001589
Gavin Shan959c9bd2013-04-25 19:21:02 +00001590 /*
1591 * The function can be called while the PE#
1592 * hasn't been assigned. Do nothing for the
1593 * case.
1594 */
1595 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1596 return;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001597
Gavin Shan959c9bd2013-04-25 19:21:02 +00001598 pe = &phb->ioda.pe_array[pdn->pe_number];
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001599 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001600 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
Alexey Kardashevskiy46170822015-06-05 16:34:54 +10001601 /*
1602 * Note: iommu_add_device() will fail here as
1603 * for physical PE: the device is already added by now;
1604 * for virtual PE: sysfs entries are not ready yet and
1605 * tce_iommu_bus_notifier will add the device to a group later.
1606 */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001607}
1608
Daniel Axtens763d2d82015-04-28 15:12:07 +10001609static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001610{
Daniel Axtens763d2d82015-04-28 15:12:07 +10001611 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1612 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001613 struct pci_dn *pdn = pci_get_pdn(pdev);
1614 struct pnv_ioda_pe *pe;
1615 uint64_t top;
1616 bool bypass = false;
1617
1618 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1619 return -ENODEV;;
1620
1621 pe = &phb->ioda.pe_array[pdn->pe_number];
1622 if (pe->tce_bypass_enabled) {
1623 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1624 bypass = (dma_mask >= top);
1625 }
1626
1627 if (bypass) {
1628 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
1629 set_dma_ops(&pdev->dev, &dma_direct_ops);
1630 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
1631 } else {
1632 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
1633 set_dma_ops(&pdev->dev, &dma_iommu_ops);
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001634 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001635 }
Brian W Harta32305b2014-07-31 14:24:37 -05001636 *pdev->dev.dma_mask = dma_mask;
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001637 return 0;
1638}
1639
Gavin Shanfe7e85c2014-09-30 12:39:10 +10001640static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
1641 struct pci_dev *pdev)
1642{
1643 struct pci_dn *pdn = pci_get_pdn(pdev);
1644 struct pnv_ioda_pe *pe;
1645 u64 end, mask;
1646
1647 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1648 return 0;
1649
1650 pe = &phb->ioda.pe_array[pdn->pe_number];
1651 if (!pe->tce_bypass_enabled)
1652 return __dma_get_required_mask(&pdev->dev);
1653
1654
1655 end = pe->tce_bypass_base + memblock_end_of_DRAM();
1656 mask = 1ULL << (fls64(end) - 1);
1657 mask += mask - 1;
1658
1659 return mask;
1660}
1661
Gavin Shandff4a392014-07-15 17:00:55 +10001662static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
Alexey Kardashevskiyea30e992015-06-05 16:34:53 +10001663 struct pci_bus *bus)
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +10001664{
1665 struct pci_dev *dev;
1666
1667 list_for_each_entry(dev, &bus->devices, bus_list) {
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001668 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
Alexey Kardashevskiy46170822015-06-05 16:34:54 +10001669 iommu_add_device(&dev->dev);
Gavin Shandff4a392014-07-15 17:00:55 +10001670
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +10001671 if (dev->subordinate)
Alexey Kardashevskiyea30e992015-06-05 16:34:53 +10001672 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +10001673 }
1674}
1675
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001676static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
1677 unsigned long index, unsigned long npages, bool rm)
Gavin Shan4cce9552013-04-25 19:21:00 +00001678{
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001679 struct pnv_ioda_pe *pe = container_of(tbl->it_table_group,
1680 struct pnv_ioda_pe, table_group);
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001681 __be64 __iomem *invalidate = rm ?
1682 (__be64 __iomem *)pe->tce_inval_reg_phys :
1683 (__be64 __iomem *)tbl->it_index;
Gavin Shan4cce9552013-04-25 19:21:00 +00001684 unsigned long start, end, inc;
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +10001685 const unsigned shift = tbl->it_page_shift;
Gavin Shan4cce9552013-04-25 19:21:00 +00001686
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001687 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1688 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1689 npages - 1);
Gavin Shan4cce9552013-04-25 19:21:00 +00001690
1691 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
1692 if (tbl->it_busno) {
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +10001693 start <<= shift;
1694 end <<= shift;
1695 inc = 128ull << shift;
Gavin Shan4cce9552013-04-25 19:21:00 +00001696 start |= tbl->it_busno;
1697 end |= tbl->it_busno;
1698 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
1699 /* p7ioc-style invalidation, 2 TCEs per write */
1700 start |= (1ull << 63);
1701 end |= (1ull << 63);
1702 inc = 16;
1703 } else {
1704 /* Default (older HW) */
1705 inc = 128;
1706 }
1707
1708 end |= inc - 1; /* round up end to be different than start */
1709
1710 mb(); /* Ensure above stores are visible */
1711 while (start <= end) {
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10001712 if (rm)
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001713 __raw_rm_writeq(cpu_to_be64(start), invalidate);
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10001714 else
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001715 __raw_writeq(cpu_to_be64(start), invalidate);
Gavin Shan4cce9552013-04-25 19:21:00 +00001716 start += inc;
1717 }
1718
1719 /*
1720 * The iommu layer will do another mb() for us on build()
1721 * and we don't care on free()
1722 */
1723}
1724
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001725static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1726 long npages, unsigned long uaddr,
1727 enum dma_data_direction direction,
1728 struct dma_attrs *attrs)
1729{
1730 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1731 attrs);
1732
1733 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1734 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1735
1736 return ret;
1737}
1738
1739static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1740 long npages)
1741{
1742 pnv_tce_free(tbl, index, npages);
1743
1744 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1745 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
1746}
1747
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10001748static struct iommu_table_ops pnv_ioda1_iommu_ops = {
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001749 .set = pnv_ioda1_tce_build,
1750 .clear = pnv_ioda1_tce_free,
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10001751 .get = pnv_tce_get,
1752};
1753
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001754static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
1755 unsigned long index, unsigned long npages, bool rm)
Gavin Shan4cce9552013-04-25 19:21:00 +00001756{
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001757 struct pnv_ioda_pe *pe = container_of(tbl->it_table_group,
1758 struct pnv_ioda_pe, table_group);
Gavin Shan4cce9552013-04-25 19:21:00 +00001759 unsigned long start, end, inc;
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001760 __be64 __iomem *invalidate = rm ?
1761 (__be64 __iomem *)pe->tce_inval_reg_phys :
1762 (__be64 __iomem *)tbl->it_index;
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +10001763 const unsigned shift = tbl->it_page_shift;
Gavin Shan4cce9552013-04-25 19:21:00 +00001764
1765 /* We'll invalidate DMA address in PE scope */
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +10001766 start = 0x2ull << 60;
Gavin Shan4cce9552013-04-25 19:21:00 +00001767 start |= (pe->pe_number & 0xFF);
1768 end = start;
1769
1770 /* Figure out the start, end and step */
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001771 start |= (index << shift);
1772 end |= ((index + npages - 1) << shift);
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +10001773 inc = (0x1ull << shift);
Gavin Shan4cce9552013-04-25 19:21:00 +00001774 mb();
1775
1776 while (start <= end) {
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10001777 if (rm)
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001778 __raw_rm_writeq(cpu_to_be64(start), invalidate);
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10001779 else
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001780 __raw_writeq(cpu_to_be64(start), invalidate);
Gavin Shan4cce9552013-04-25 19:21:00 +00001781 start += inc;
1782 }
1783}
1784
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001785static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
1786 long npages, unsigned long uaddr,
1787 enum dma_data_direction direction,
1788 struct dma_attrs *attrs)
Gavin Shan4cce9552013-04-25 19:21:00 +00001789{
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001790 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1791 attrs);
Gavin Shan4cce9552013-04-25 19:21:00 +00001792
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001793 if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
1794 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1795
1796 return ret;
1797}
1798
1799static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
1800 long npages)
1801{
1802 pnv_tce_free(tbl, index, npages);
1803
1804 if (tbl->it_type & TCE_PCI_SWINV_FREE)
1805 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
Gavin Shan4cce9552013-04-25 19:21:00 +00001806}
1807
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10001808static struct iommu_table_ops pnv_ioda2_iommu_ops = {
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001809 .set = pnv_ioda2_tce_build,
1810 .clear = pnv_ioda2_tce_free,
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10001811 .get = pnv_tce_get,
1812};
1813
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001814static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
1815 struct pnv_ioda_pe *pe, unsigned int base,
1816 unsigned int segs)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001817{
1818
1819 struct page *tce_mem = NULL;
1820 const __be64 *swinvp;
1821 struct iommu_table *tbl;
1822 unsigned int i;
1823 int64_t rc;
1824 void *addr;
1825
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001826 /* XXX FIXME: Handle 64-bit only DMA devices */
1827 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
1828 /* XXX FIXME: Allocate multi-level tables on PHB3 */
1829
1830 /* We shouldn't already have a 32-bit DMA associated */
1831 if (WARN_ON(pe->tce32_seg >= 0))
1832 return;
1833
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001834 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
1835 phb->hose->node);
1836 tbl->it_table_group = &pe->table_group;
1837 pe->table_group.tables[0] = tbl;
1838 iommu_register_group(&pe->table_group, phb->hose->global_number,
1839 pe->pe_number);
Alexey Kardashevskiyc5773822015-06-05 16:34:55 +10001840
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001841 /* Grab a 32-bit TCE table */
1842 pe->tce32_seg = base;
1843 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
1844 (base << 28), ((base + segs) << 28) - 1);
1845
1846 /* XXX Currently, we allocate one big contiguous table for the
1847 * TCEs. We only really need one chunk per 256M of TCE space
1848 * (ie per segment) but that's an optimization for later, it
1849 * requires some added smarts with our get/put_tce implementation
1850 */
1851 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
1852 get_order(TCE32_TABLE_SIZE * segs));
1853 if (!tce_mem) {
1854 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
1855 goto fail;
1856 }
1857 addr = page_address(tce_mem);
1858 memset(addr, 0, TCE32_TABLE_SIZE * segs);
1859
1860 /* Configure HW */
1861 for (i = 0; i < segs; i++) {
1862 rc = opal_pci_map_pe_dma_window(phb->opal_id,
1863 pe->pe_number,
1864 base + i, 1,
1865 __pa(addr) + TCE32_TABLE_SIZE * i,
1866 TCE32_TABLE_SIZE, 0x1000);
1867 if (rc) {
1868 pe_err(pe, " Failed to configure 32-bit TCE table,"
1869 " err %ld\n", rc);
1870 goto fail;
1871 }
1872 }
1873
1874 /* Setup linux iommu table */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001875 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
Alexey Kardashevskiy8fa5d452014-06-06 18:44:03 +10001876 base << 28, IOMMU_PAGE_SHIFT_4K);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001877
1878 /* OPAL variant of P7IOC SW invalidated TCEs */
1879 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
1880 if (swinvp) {
1881 /* We need a couple more fields -- an address and a data
1882 * to or. Since the bus is only printed out on table free
1883 * errors, and on the first pass the data will be a relative
1884 * bus number, print that out instead.
1885 */
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10001886 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
1887 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
1888 8);
Gavin Shan65fd7662014-04-24 18:00:28 +10001889 tbl->it_type |= (TCE_PCI_SWINV_CREATE |
1890 TCE_PCI_SWINV_FREE |
1891 TCE_PCI_SWINV_PAIR);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001892 }
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10001893 tbl->it_ops = &pnv_ioda1_iommu_ops;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001894 iommu_init_table(tbl, phb->hose->node);
1895
Wei Yang781a8682015-03-25 16:23:57 +08001896 if (pe->flags & PNV_IODA_PE_DEV) {
Alexey Kardashevskiy46170822015-06-05 16:34:54 +10001897 /*
1898 * Setting table base here only for carrying iommu_group
1899 * further down to let iommu_add_device() do the job.
1900 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
1901 */
1902 set_iommu_table_base(&pe->pdev->dev, tbl);
1903 iommu_add_device(&pe->pdev->dev);
Alexey Kardashevskiyc5773822015-06-05 16:34:55 +10001904 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
Alexey Kardashevskiyea30e992015-06-05 16:34:53 +10001905 pnv_ioda_setup_bus_dma(pe, pe->pbus);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +10001906
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001907 return;
1908 fail:
1909 /* XXX Failure: Try to fallback to 64-bit only ? */
1910 if (pe->tce32_seg >= 0)
1911 pe->tce32_seg = -1;
1912 if (tce_mem)
1913 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
1914}
1915
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001916static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
1917{
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001918 struct pnv_ioda_pe *pe = container_of(tbl->it_table_group,
1919 struct pnv_ioda_pe, table_group);
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001920 uint16_t window_id = (pe->pe_number << 1 ) + 1;
1921 int64_t rc;
1922
1923 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
1924 if (enable) {
1925 phys_addr_t top = memblock_end_of_DRAM();
1926
1927 top = roundup_pow_of_two(top);
1928 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1929 pe->pe_number,
1930 window_id,
1931 pe->tce_bypass_base,
1932 top);
1933 } else {
1934 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1935 pe->pe_number,
1936 window_id,
1937 pe->tce_bypass_base,
1938 0);
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001939 }
1940 if (rc)
1941 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
1942 else
1943 pe->tce_bypass_enabled = enable;
1944}
1945
1946static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
1947 struct pnv_ioda_pe *pe)
1948{
1949 /* TVE #1 is selected by PCI address bit 59 */
1950 pe->tce_bypass_base = 1ull << 59;
1951
1952 /* Install set_bypass callback for VFIO */
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001953 pe->table_group.tables[0]->set_bypass = pnv_pci_ioda2_set_bypass;
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001954
1955 /* Enable bypass by default */
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001956 pnv_pci_ioda2_set_bypass(pe->table_group.tables[0], true);
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001957}
1958
Gavin Shan373f5652013-04-25 19:21:01 +00001959static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1960 struct pnv_ioda_pe *pe)
1961{
1962 struct page *tce_mem = NULL;
1963 void *addr;
1964 const __be64 *swinvp;
1965 struct iommu_table *tbl;
1966 unsigned int tce_table_size, end;
1967 int64_t rc;
1968
1969 /* We shouldn't already have a 32-bit DMA associated */
1970 if (WARN_ON(pe->tce32_seg >= 0))
1971 return;
1972
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001973 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
1974 phb->hose->node);
1975 tbl->it_table_group = &pe->table_group;
1976 pe->table_group.tables[0] = tbl;
1977 iommu_register_group(&pe->table_group, phb->hose->global_number,
1978 pe->pe_number);
Alexey Kardashevskiyc5773822015-06-05 16:34:55 +10001979
Gavin Shan373f5652013-04-25 19:21:01 +00001980 /* The PE will reserve all possible 32-bits space */
1981 pe->tce32_seg = 0;
1982 end = (1 << ilog2(phb->ioda.m32_pci_base));
1983 tce_table_size = (end / 0x1000) * 8;
1984 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
1985 end);
1986
1987 /* Allocate TCE table */
1988 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
1989 get_order(tce_table_size));
1990 if (!tce_mem) {
1991 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
1992 goto fail;
1993 }
1994 addr = page_address(tce_mem);
1995 memset(addr, 0, tce_table_size);
1996
1997 /*
1998 * Map TCE table through TVT. The TVE index is the PE number
1999 * shifted by 1 bit for 32-bits DMA space.
2000 */
2001 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2002 pe->pe_number << 1, 1, __pa(addr),
2003 tce_table_size, 0x1000);
2004 if (rc) {
2005 pe_err(pe, "Failed to configure 32-bit TCE table,"
2006 " err %ld\n", rc);
2007 goto fail;
2008 }
2009
2010 /* Setup linux iommu table */
Alexey Kardashevskiy8fa5d452014-06-06 18:44:03 +10002011 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
2012 IOMMU_PAGE_SHIFT_4K);
Gavin Shan373f5652013-04-25 19:21:01 +00002013
2014 /* OPAL variant of PHB3 invalidated TCEs */
2015 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
2016 if (swinvp) {
2017 /* We need a couple more fields -- an address and a data
2018 * to or. Since the bus is only printed out on table free
2019 * errors, and on the first pass the data will be a relative
2020 * bus number, print that out instead.
2021 */
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10002022 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
2023 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
2024 8);
Gavin Shan65fd7662014-04-24 18:00:28 +10002025 tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
Gavin Shan373f5652013-04-25 19:21:01 +00002026 }
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10002027 tbl->it_ops = &pnv_ioda2_iommu_ops;
Gavin Shan373f5652013-04-25 19:21:01 +00002028 iommu_init_table(tbl, phb->hose->node);
2029
Wei Yang781a8682015-03-25 16:23:57 +08002030 if (pe->flags & PNV_IODA_PE_DEV) {
Alexey Kardashevskiy46170822015-06-05 16:34:54 +10002031 /*
2032 * Setting table base here only for carrying iommu_group
2033 * further down to let iommu_add_device() do the job.
2034 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2035 */
2036 set_iommu_table_base(&pe->pdev->dev, tbl);
2037 iommu_add_device(&pe->pdev->dev);
Alexey Kardashevskiyc5773822015-06-05 16:34:55 +10002038 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
Alexey Kardashevskiyea30e992015-06-05 16:34:53 +10002039 pnv_ioda_setup_bus_dma(pe, pe->pbus);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +10002040
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11002041 /* Also create a bypass window */
Thadeu Lima de Souza Cascardo4e287842014-10-23 19:19:35 -02002042 if (!pnv_iommu_bypass_disabled)
2043 pnv_pci_ioda2_setup_bypass_pe(phb, pe);
2044
Gavin Shan373f5652013-04-25 19:21:01 +00002045 return;
2046fail:
2047 if (pe->tce32_seg >= 0)
2048 pe->tce32_seg = -1;
2049 if (tce_mem)
2050 __free_pages(tce_mem, get_order(tce_table_size));
2051}
2052
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08002053static void pnv_ioda_setup_dma(struct pnv_phb *phb)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002054{
2055 struct pci_controller *hose = phb->hose;
2056 unsigned int residual, remaining, segs, tw, base;
2057 struct pnv_ioda_pe *pe;
2058
2059 /* If we have more PE# than segments available, hand out one
2060 * per PE until we run out and let the rest fail. If not,
2061 * then we assign at least one segment per PE, plus more based
2062 * on the amount of devices under that PE
2063 */
2064 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
2065 residual = 0;
2066 else
2067 residual = phb->ioda.tce32_count -
2068 phb->ioda.dma_pe_count;
2069
2070 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
2071 hose->global_number, phb->ioda.tce32_count);
2072 pr_info("PCI: %d PE# for a total weight of %d\n",
2073 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
2074
2075 /* Walk our PE list and configure their DMA segments, hand them
2076 * out one base segment plus any residual segments based on
2077 * weight
2078 */
2079 remaining = phb->ioda.tce32_count;
2080 tw = phb->ioda.dma_weight;
2081 base = 0;
Gavin Shan7ebdf952012-08-20 03:49:15 +00002082 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002083 if (!pe->dma_weight)
2084 continue;
2085 if (!remaining) {
2086 pe_warn(pe, "No DMA32 resources available\n");
2087 continue;
2088 }
2089 segs = 1;
2090 if (residual) {
2091 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
2092 if (segs > remaining)
2093 segs = remaining;
2094 }
Gavin Shan373f5652013-04-25 19:21:01 +00002095
2096 /*
2097 * For IODA2 compliant PHB3, we needn't care about the weight.
2098 * The all available 32-bits DMA space will be assigned to
2099 * the specific PE.
2100 */
2101 if (phb->type == PNV_PHB_IODA1) {
2102 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
2103 pe->dma_weight, segs);
2104 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
2105 } else {
2106 pe_info(pe, "Assign DMA32 space\n");
2107 segs = 0;
2108 pnv_pci_ioda2_setup_dma_pe(phb, pe);
2109 }
2110
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002111 remaining -= segs;
2112 base += segs;
2113 }
2114}
2115
2116#ifdef CONFIG_PCI_MSI
Gavin Shan137436c2013-04-25 19:20:59 +00002117static void pnv_ioda2_msi_eoi(struct irq_data *d)
2118{
2119 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
2120 struct irq_chip *chip = irq_data_get_irq_chip(d);
2121 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
2122 ioda.irq_chip);
2123 int64_t rc;
2124
2125 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
2126 WARN_ON_ONCE(rc);
2127
2128 icp_native_eoi(d);
2129}
2130
Ian Munsiefd9a1c22014-10-08 19:54:55 +11002131
2132static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
2133{
2134 struct irq_data *idata;
2135 struct irq_chip *ichip;
2136
2137 if (phb->type != PNV_PHB_IODA2)
2138 return;
2139
2140 if (!phb->ioda.irq_chip_init) {
2141 /*
2142 * First time we setup an MSI IRQ, we need to setup the
2143 * corresponding IRQ chip to route correctly.
2144 */
2145 idata = irq_get_irq_data(virq);
2146 ichip = irq_data_get_irq_chip(idata);
2147 phb->ioda.irq_chip_init = 1;
2148 phb->ioda.irq_chip = *ichip;
2149 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2150 }
2151 irq_set_chip(virq, &phb->ioda.irq_chip);
2152}
2153
Ian Munsie80c49c72014-10-08 19:54:57 +11002154#ifdef CONFIG_CXL_BASE
2155
Ryan Grimm6f963ec2015-01-28 20:16:04 -06002156struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
Ian Munsie80c49c72014-10-08 19:54:57 +11002157{
2158 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2159
Ryan Grimm6f963ec2015-01-28 20:16:04 -06002160 return of_node_get(hose->dn);
Ian Munsie80c49c72014-10-08 19:54:57 +11002161}
Ryan Grimm6f963ec2015-01-28 20:16:04 -06002162EXPORT_SYMBOL(pnv_pci_get_phb_node);
Ian Munsie80c49c72014-10-08 19:54:57 +11002163
Ryan Grimm1212aa12015-01-19 11:52:50 -06002164int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
Ian Munsie80c49c72014-10-08 19:54:57 +11002165{
2166 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2167 struct pnv_phb *phb = hose->private_data;
2168 struct pnv_ioda_pe *pe;
2169 int rc;
2170
2171 pe = pnv_ioda_get_pe(dev);
2172 if (!pe)
2173 return -ENODEV;
2174
2175 pe_info(pe, "Switching PHB to CXL\n");
2176
Ryan Grimm1212aa12015-01-19 11:52:50 -06002177 rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
Ian Munsie80c49c72014-10-08 19:54:57 +11002178 if (rc)
2179 dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
2180
2181 return rc;
2182}
Ryan Grimm1212aa12015-01-19 11:52:50 -06002183EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
Ian Munsie80c49c72014-10-08 19:54:57 +11002184
2185/* Find PHB for cxl dev and allocate MSI hwirqs?
2186 * Returns the absolute hardware IRQ number
2187 */
2188int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
2189{
2190 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2191 struct pnv_phb *phb = hose->private_data;
2192 int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
2193
2194 if (hwirq < 0) {
2195 dev_warn(&dev->dev, "Failed to find a free MSI\n");
2196 return -ENOSPC;
2197 }
2198
2199 return phb->msi_base + hwirq;
2200}
2201EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
2202
2203void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
2204{
2205 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2206 struct pnv_phb *phb = hose->private_data;
2207
2208 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
2209}
2210EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
2211
2212void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
2213 struct pci_dev *dev)
2214{
2215 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2216 struct pnv_phb *phb = hose->private_data;
2217 int i, hwirq;
2218
2219 for (i = 1; i < CXL_IRQ_RANGES; i++) {
2220 if (!irqs->range[i])
2221 continue;
2222 pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
2223 i, irqs->offset[i],
2224 irqs->range[i]);
2225 hwirq = irqs->offset[i] - phb->msi_base;
2226 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
2227 irqs->range[i]);
2228 }
2229}
2230EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
2231
2232int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
2233 struct pci_dev *dev, int num)
2234{
2235 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2236 struct pnv_phb *phb = hose->private_data;
2237 int i, hwirq, try;
2238
2239 memset(irqs, 0, sizeof(struct cxl_irq_ranges));
2240
2241 /* 0 is reserved for the multiplexed PSL DSI interrupt */
2242 for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
2243 try = num;
2244 while (try) {
2245 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
2246 if (hwirq >= 0)
2247 break;
2248 try /= 2;
2249 }
2250 if (!try)
2251 goto fail;
2252
2253 irqs->offset[i] = phb->msi_base + hwirq;
2254 irqs->range[i] = try;
2255 pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
2256 i, irqs->offset[i], irqs->range[i]);
2257 num -= try;
2258 }
2259 if (num)
2260 goto fail;
2261
2262 return 0;
2263fail:
2264 pnv_cxl_release_hwirq_ranges(irqs, dev);
2265 return -ENOSPC;
2266}
2267EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
2268
2269int pnv_cxl_get_irq_count(struct pci_dev *dev)
2270{
2271 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2272 struct pnv_phb *phb = hose->private_data;
2273
2274 return phb->msi_bmp.irq_count;
2275}
2276EXPORT_SYMBOL(pnv_cxl_get_irq_count);
2277
2278int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
2279 unsigned int virq)
2280{
2281 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2282 struct pnv_phb *phb = hose->private_data;
2283 unsigned int xive_num = hwirq - phb->msi_base;
2284 struct pnv_ioda_pe *pe;
2285 int rc;
2286
2287 if (!(pe = pnv_ioda_get_pe(dev)))
2288 return -ENODEV;
2289
2290 /* Assign XIVE to PE */
2291 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2292 if (rc) {
2293 pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
2294 "hwirq 0x%x XIVE 0x%x PE\n",
2295 pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
2296 return -EIO;
2297 }
2298 set_msi_irq_chip(phb, virq);
2299
2300 return 0;
2301}
2302EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
2303#endif
2304
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002305static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
Gavin Shan137436c2013-04-25 19:20:59 +00002306 unsigned int hwirq, unsigned int virq,
2307 unsigned int is_64, struct msi_msg *msg)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002308{
2309 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2310 unsigned int xive_num = hwirq - phb->msi_base;
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002311 __be32 data;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002312 int rc;
2313
2314 /* No PE assigned ? bail out ... no MSI for you ! */
2315 if (pe == NULL)
2316 return -ENXIO;
2317
2318 /* Check if we have an MVE */
2319 if (pe->mve_number < 0)
2320 return -ENXIO;
2321
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00002322 /* Force 32-bit MSI on some broken devices */
Benjamin Herrenschmidt36074382014-10-07 16:12:36 +11002323 if (dev->no_64bit_msi)
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00002324 is_64 = 0;
2325
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002326 /* Assign XIVE to PE */
2327 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2328 if (rc) {
2329 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2330 pci_name(dev), rc, xive_num);
2331 return -EIO;
2332 }
2333
2334 if (is_64) {
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002335 __be64 addr64;
2336
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002337 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2338 &addr64, &data);
2339 if (rc) {
2340 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2341 pci_name(dev), rc);
2342 return -EIO;
2343 }
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002344 msg->address_hi = be64_to_cpu(addr64) >> 32;
2345 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002346 } else {
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002347 __be32 addr32;
2348
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002349 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2350 &addr32, &data);
2351 if (rc) {
2352 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2353 pci_name(dev), rc);
2354 return -EIO;
2355 }
2356 msg->address_hi = 0;
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002357 msg->address_lo = be32_to_cpu(addr32);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002358 }
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002359 msg->data = be32_to_cpu(data);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002360
Ian Munsiefd9a1c22014-10-08 19:54:55 +11002361 set_msi_irq_chip(phb, virq);
Gavin Shan137436c2013-04-25 19:20:59 +00002362
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002363 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
2364 " address=%x_%08x data=%x PE# %d\n",
2365 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2366 msg->address_hi, msg->address_lo, data, pe->pe_number);
2367
2368 return 0;
2369}
2370
2371static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2372{
Gavin Shanfb1b55d2013-03-05 21:12:37 +00002373 unsigned int count;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002374 const __be32 *prop = of_get_property(phb->hose->dn,
2375 "ibm,opal-msi-ranges", NULL);
2376 if (!prop) {
2377 /* BML Fallback */
2378 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2379 }
2380 if (!prop)
2381 return;
2382
2383 phb->msi_base = be32_to_cpup(prop);
Gavin Shanfb1b55d2013-03-05 21:12:37 +00002384 count = be32_to_cpup(prop + 1);
2385 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002386 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2387 phb->hose->global_number);
2388 return;
2389 }
Gavin Shanfb1b55d2013-03-05 21:12:37 +00002390
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002391 phb->msi_setup = pnv_pci_ioda_msi_setup;
2392 phb->msi32_support = 1;
2393 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
Gavin Shanfb1b55d2013-03-05 21:12:37 +00002394 count, phb->msi_base);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002395}
2396#else
2397static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
2398#endif /* CONFIG_PCI_MSI */
2399
Wei Yang6e628c72015-03-25 16:23:55 +08002400#ifdef CONFIG_PCI_IOV
2401static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2402{
2403 struct pci_controller *hose;
2404 struct pnv_phb *phb;
2405 struct resource *res;
2406 int i;
2407 resource_size_t size;
2408 struct pci_dn *pdn;
Wei Yang5b88ec22015-03-25 16:23:58 +08002409 int mul, total_vfs;
Wei Yang6e628c72015-03-25 16:23:55 +08002410
2411 if (!pdev->is_physfn || pdev->is_added)
2412 return;
2413
2414 hose = pci_bus_to_host(pdev->bus);
2415 phb = hose->private_data;
2416
2417 pdn = pci_get_pdn(pdev);
2418 pdn->vfs_expanded = 0;
2419
Wei Yang5b88ec22015-03-25 16:23:58 +08002420 total_vfs = pci_sriov_get_totalvfs(pdev);
2421 pdn->m64_per_iov = 1;
2422 mul = phb->ioda.total_pe;
2423
2424 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2425 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2426 if (!res->flags || res->parent)
2427 continue;
2428 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2429 dev_warn(&pdev->dev, " non M64 VF BAR%d: %pR\n",
2430 i, res);
2431 continue;
2432 }
2433
2434 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2435
2436 /* bigger than 64M */
2437 if (size > (1 << 26)) {
2438 dev_info(&pdev->dev, "PowerNV: VF BAR%d: %pR IOV size is bigger than 64M, roundup power2\n",
2439 i, res);
2440 pdn->m64_per_iov = M64_PER_IOV;
2441 mul = roundup_pow_of_two(total_vfs);
2442 break;
2443 }
2444 }
2445
Wei Yang6e628c72015-03-25 16:23:55 +08002446 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2447 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2448 if (!res->flags || res->parent)
2449 continue;
2450 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2451 dev_warn(&pdev->dev, "Skipping expanding VF BAR%d: %pR\n",
2452 i, res);
2453 continue;
2454 }
2455
2456 dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
2457 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
Wei Yang5b88ec22015-03-25 16:23:58 +08002458 res->end = res->start + size * mul - 1;
Wei Yang6e628c72015-03-25 16:23:55 +08002459 dev_dbg(&pdev->dev, " %pR\n", res);
2460 dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
Wei Yang5b88ec22015-03-25 16:23:58 +08002461 i, res, mul);
Wei Yang6e628c72015-03-25 16:23:55 +08002462 }
Wei Yang5b88ec22015-03-25 16:23:58 +08002463 pdn->vfs_expanded = mul;
Wei Yang6e628c72015-03-25 16:23:55 +08002464}
2465#endif /* CONFIG_PCI_IOV */
2466
Gavin Shan11685be2012-08-20 03:49:16 +00002467/*
2468 * This function is supposed to be called on basis of PE from top
2469 * to bottom style. So the the I/O or MMIO segment assigned to
2470 * parent PE could be overrided by its child PEs if necessary.
2471 */
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08002472static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
2473 struct pnv_ioda_pe *pe)
Gavin Shan11685be2012-08-20 03:49:16 +00002474{
2475 struct pnv_phb *phb = hose->private_data;
2476 struct pci_bus_region region;
2477 struct resource *res;
2478 int i, index;
2479 int rc;
2480
2481 /*
2482 * NOTE: We only care PCI bus based PE for now. For PCI
2483 * device based PE, for example SRIOV sensitive VF should
2484 * be figured out later.
2485 */
2486 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
2487
2488 pci_bus_for_each_resource(pe->pbus, res, i) {
2489 if (!res || !res->flags ||
2490 res->start > res->end)
2491 continue;
2492
2493 if (res->flags & IORESOURCE_IO) {
2494 region.start = res->start - phb->ioda.io_pci_base;
2495 region.end = res->end - phb->ioda.io_pci_base;
2496 index = region.start / phb->ioda.io_segsize;
2497
2498 while (index < phb->ioda.total_pe &&
2499 region.start <= region.end) {
2500 phb->ioda.io_segmap[index] = pe->pe_number;
2501 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2502 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
2503 if (rc != OPAL_SUCCESS) {
2504 pr_err("%s: OPAL error %d when mapping IO "
2505 "segment #%d to PE#%d\n",
2506 __func__, rc, index, pe->pe_number);
2507 break;
2508 }
2509
2510 region.start += phb->ioda.io_segsize;
2511 index++;
2512 }
Gavin Shan027fa022015-03-27 11:29:00 +11002513 } else if ((res->flags & IORESOURCE_MEM) &&
2514 !pnv_pci_is_mem_pref_64(res->flags)) {
Gavin Shan11685be2012-08-20 03:49:16 +00002515 region.start = res->start -
Benjamin Herrenschmidt3fd47f02013-05-06 13:40:40 +10002516 hose->mem_offset[0] -
Gavin Shan11685be2012-08-20 03:49:16 +00002517 phb->ioda.m32_pci_base;
2518 region.end = res->end -
Benjamin Herrenschmidt3fd47f02013-05-06 13:40:40 +10002519 hose->mem_offset[0] -
Gavin Shan11685be2012-08-20 03:49:16 +00002520 phb->ioda.m32_pci_base;
2521 index = region.start / phb->ioda.m32_segsize;
2522
2523 while (index < phb->ioda.total_pe &&
2524 region.start <= region.end) {
2525 phb->ioda.m32_segmap[index] = pe->pe_number;
2526 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
2527 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
2528 if (rc != OPAL_SUCCESS) {
2529 pr_err("%s: OPAL error %d when mapping M32 "
2530 "segment#%d to PE#%d",
2531 __func__, rc, index, pe->pe_number);
2532 break;
2533 }
2534
2535 region.start += phb->ioda.m32_segsize;
2536 index++;
2537 }
2538 }
2539 }
2540}
2541
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08002542static void pnv_pci_ioda_setup_seg(void)
Gavin Shan11685be2012-08-20 03:49:16 +00002543{
2544 struct pci_controller *tmp, *hose;
2545 struct pnv_phb *phb;
2546 struct pnv_ioda_pe *pe;
2547
2548 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2549 phb = hose->private_data;
2550 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2551 pnv_ioda_setup_pe_seg(hose, pe);
2552 }
2553 }
2554}
2555
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08002556static void pnv_pci_ioda_setup_DMA(void)
Gavin Shan13395c42012-08-20 03:49:17 +00002557{
2558 struct pci_controller *hose, *tmp;
Gavin Shandb1266c2012-08-20 03:49:18 +00002559 struct pnv_phb *phb;
Gavin Shan13395c42012-08-20 03:49:17 +00002560
2561 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2562 pnv_ioda_setup_dma(hose->private_data);
Gavin Shandb1266c2012-08-20 03:49:18 +00002563
2564 /* Mark the PHB initialization done */
2565 phb = hose->private_data;
2566 phb->initialized = 1;
Gavin Shan13395c42012-08-20 03:49:17 +00002567 }
2568}
2569
Gavin Shan37c367f2013-06-20 18:13:25 +08002570static void pnv_pci_ioda_create_dbgfs(void)
2571{
2572#ifdef CONFIG_DEBUG_FS
2573 struct pci_controller *hose, *tmp;
2574 struct pnv_phb *phb;
2575 char name[16];
2576
2577 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2578 phb = hose->private_data;
2579
2580 sprintf(name, "PCI%04x", hose->global_number);
2581 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
2582 if (!phb->dbgfs)
2583 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
2584 __func__, hose->global_number);
2585 }
2586#endif /* CONFIG_DEBUG_FS */
2587}
2588
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08002589static void pnv_pci_ioda_fixup(void)
Gavin Shanfb446ad2012-08-20 03:49:14 +00002590{
2591 pnv_pci_ioda_setup_PEs();
Gavin Shan11685be2012-08-20 03:49:16 +00002592 pnv_pci_ioda_setup_seg();
Gavin Shan13395c42012-08-20 03:49:17 +00002593 pnv_pci_ioda_setup_DMA();
Gavin Shane9cc17d2013-06-20 13:21:14 +08002594
Gavin Shan37c367f2013-06-20 18:13:25 +08002595 pnv_pci_ioda_create_dbgfs();
2596
Gavin Shane9cc17d2013-06-20 13:21:14 +08002597#ifdef CONFIG_EEH
Gavin Shane9cc17d2013-06-20 13:21:14 +08002598 eeh_init();
Mike Qiudadcd6d2014-06-26 02:58:47 -04002599 eeh_addr_cache_build();
Gavin Shane9cc17d2013-06-20 13:21:14 +08002600#endif
Gavin Shanfb446ad2012-08-20 03:49:14 +00002601}
2602
Gavin Shan271fd032012-09-11 16:59:47 -06002603/*
2604 * Returns the alignment for I/O or memory windows for P2P
2605 * bridges. That actually depends on how PEs are segmented.
2606 * For now, we return I/O or M32 segment size for PE sensitive
2607 * P2P bridges. Otherwise, the default values (4KiB for I/O,
2608 * 1MiB for memory) will be returned.
2609 *
2610 * The current PCI bus might be put into one PE, which was
2611 * create against the parent PCI bridge. For that case, we
2612 * needn't enlarge the alignment so that we can save some
2613 * resources.
2614 */
2615static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
2616 unsigned long type)
2617{
2618 struct pci_dev *bridge;
2619 struct pci_controller *hose = pci_bus_to_host(bus);
2620 struct pnv_phb *phb = hose->private_data;
2621 int num_pci_bridges = 0;
2622
2623 bridge = bus->self;
2624 while (bridge) {
2625 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
2626 num_pci_bridges++;
2627 if (num_pci_bridges >= 2)
2628 return 1;
2629 }
2630
2631 bridge = bridge->bus->self;
2632 }
2633
Guo Chao262af552014-07-21 14:42:30 +10002634 /* We fail back to M32 if M64 isn't supported */
2635 if (phb->ioda.m64_segsize &&
2636 pnv_pci_is_mem_pref_64(type))
2637 return phb->ioda.m64_segsize;
Gavin Shan271fd032012-09-11 16:59:47 -06002638 if (type & IORESOURCE_MEM)
2639 return phb->ioda.m32_segsize;
2640
2641 return phb->ioda.io_segsize;
2642}
2643
Wei Yang5350ab32015-03-25 16:23:56 +08002644#ifdef CONFIG_PCI_IOV
2645static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
2646 int resno)
2647{
2648 struct pci_dn *pdn = pci_get_pdn(pdev);
2649 resource_size_t align, iov_align;
2650
2651 iov_align = resource_size(&pdev->resource[resno]);
2652 if (iov_align)
2653 return iov_align;
2654
2655 align = pci_iov_resource_size(pdev, resno);
2656 if (pdn->vfs_expanded)
2657 return pdn->vfs_expanded * align;
2658
2659 return align;
2660}
2661#endif /* CONFIG_PCI_IOV */
2662
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002663/* Prevent enabling devices for which we couldn't properly
2664 * assign a PE
2665 */
Daniel Axtensc88c2a12015-03-31 16:00:41 +11002666static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002667{
Gavin Shandb1266c2012-08-20 03:49:18 +00002668 struct pci_controller *hose = pci_bus_to_host(dev->bus);
2669 struct pnv_phb *phb = hose->private_data;
2670 struct pci_dn *pdn;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002671
Gavin Shandb1266c2012-08-20 03:49:18 +00002672 /* The function is probably called while the PEs have
2673 * not be created yet. For example, resource reassignment
2674 * during PCI probe period. We just skip the check if
2675 * PEs isn't ready.
2676 */
2677 if (!phb->initialized)
Daniel Axtensc88c2a12015-03-31 16:00:41 +11002678 return true;
Gavin Shandb1266c2012-08-20 03:49:18 +00002679
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00002680 pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002681 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
Daniel Axtensc88c2a12015-03-31 16:00:41 +11002682 return false;
Gavin Shandb1266c2012-08-20 03:49:18 +00002683
Daniel Axtensc88c2a12015-03-31 16:00:41 +11002684 return true;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002685}
2686
2687static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
2688 u32 devfn)
2689{
2690 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
2691}
2692
Michael Neuling7a8e6bb2015-05-27 16:06:59 +10002693static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
Benjamin Herrenschmidt73ed1482013-05-10 16:59:18 +10002694{
Michael Neuling7a8e6bb2015-05-27 16:06:59 +10002695 struct pnv_phb *phb = hose->private_data;
2696
Gavin Shand1a85ee2014-09-30 12:39:05 +10002697 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
Benjamin Herrenschmidt73ed1482013-05-10 16:59:18 +10002698 OPAL_ASSERT_RESET);
2699}
2700
Daniel Axtens92ae0352015-04-28 15:12:05 +10002701static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
2702 .dma_dev_setup = pnv_pci_dma_dev_setup,
2703#ifdef CONFIG_PCI_MSI
2704 .setup_msi_irqs = pnv_setup_msi_irqs,
2705 .teardown_msi_irqs = pnv_teardown_msi_irqs,
2706#endif
2707 .enable_device_hook = pnv_pci_enable_device_hook,
2708 .window_alignment = pnv_pci_window_alignment,
2709 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
Daniel Axtens763d2d82015-04-28 15:12:07 +10002710 .dma_set_mask = pnv_pci_ioda_dma_set_mask,
Michael Neuling7a8e6bb2015-05-27 16:06:59 +10002711 .shutdown = pnv_pci_ioda_shutdown,
Daniel Axtens92ae0352015-04-28 15:12:05 +10002712};
2713
Anton Blancharde51df2c2014-08-20 08:55:18 +10002714static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2715 u64 hub_id, int ioda_type)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002716{
2717 struct pci_controller *hose;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002718 struct pnv_phb *phb;
Gavin Shan81846162013-12-26 09:29:40 +08002719 unsigned long size, m32map_off, pemap_off, iomap_off = 0;
Alistair Popplec681b932013-09-23 12:04:57 +10002720 const __be64 *prop64;
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002721 const __be32 *prop32;
Gavin Shanf1b7cc32013-07-31 16:47:01 +08002722 int len;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002723 u64 phb_id;
2724 void *aux;
2725 long rc;
2726
Gavin Shan58d714e2013-07-31 16:47:00 +08002727 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002728
2729 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
2730 if (!prop64) {
2731 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
2732 return;
2733 }
2734 phb_id = be64_to_cpup(prop64);
2735 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
2736
Michael Ellermane39f223f2014-11-18 16:47:35 +11002737 phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
Gavin Shan58d714e2013-07-31 16:47:00 +08002738
2739 /* Allocate PCI controller */
Gavin Shan58d714e2013-07-31 16:47:00 +08002740 phb->hose = hose = pcibios_alloc_controller(np);
2741 if (!phb->hose) {
2742 pr_err(" Can't allocate PCI controller for %s\n",
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002743 np->full_name);
Michael Ellermane39f223f2014-11-18 16:47:35 +11002744 memblock_free(__pa(phb), sizeof(struct pnv_phb));
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002745 return;
2746 }
2747
2748 spin_lock_init(&phb->lock);
Gavin Shanf1b7cc32013-07-31 16:47:01 +08002749 prop32 = of_get_property(np, "bus-range", &len);
2750 if (prop32 && len == 8) {
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002751 hose->first_busno = be32_to_cpu(prop32[0]);
2752 hose->last_busno = be32_to_cpu(prop32[1]);
Gavin Shanf1b7cc32013-07-31 16:47:01 +08002753 } else {
2754 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
2755 hose->first_busno = 0;
2756 hose->last_busno = 0xff;
2757 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002758 hose->private_data = phb;
Gavin Shane9cc17d2013-06-20 13:21:14 +08002759 phb->hub_id = hub_id;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002760 phb->opal_id = phb_id;
Gavin Shanaa0c0332013-04-25 19:20:57 +00002761 phb->type = ioda_type;
Wei Yang781a8682015-03-25 16:23:57 +08002762 mutex_init(&phb->ioda.pe_alloc_mutex);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002763
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +00002764 /* Detect specific models for error handling */
2765 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
2766 phb->model = PNV_PHB_MODEL_P7IOC;
Benjamin Herrenschmidtf3d40c22013-05-04 14:24:32 +00002767 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
Gavin Shanaa0c0332013-04-25 19:20:57 +00002768 phb->model = PNV_PHB_MODEL_PHB3;
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +00002769 else
2770 phb->model = PNV_PHB_MODEL_UNKNOWN;
2771
Gavin Shanaa0c0332013-04-25 19:20:57 +00002772 /* Parse 32-bit and IO ranges (if any) */
Gavin Shan2f1ec022013-07-31 16:47:02 +08002773 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002774
Gavin Shanaa0c0332013-04-25 19:20:57 +00002775 /* Get registers */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002776 phb->regs = of_iomap(np, 0);
2777 if (phb->regs == NULL)
2778 pr_err(" Failed to map registers !\n");
2779
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002780 /* Initialize more IODA stuff */
Gavin Shan36954dc2013-11-04 16:32:47 +08002781 phb->ioda.total_pe = 1;
Gavin Shanaa0c0332013-04-25 19:20:57 +00002782 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
Gavin Shan36954dc2013-11-04 16:32:47 +08002783 if (prop32)
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002784 phb->ioda.total_pe = be32_to_cpup(prop32);
Gavin Shan36954dc2013-11-04 16:32:47 +08002785 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
2786 if (prop32)
2787 phb->ioda.reserved_pe = be32_to_cpup(prop32);
Guo Chao262af552014-07-21 14:42:30 +10002788
2789 /* Parse 64-bit MMIO range */
2790 pnv_ioda_parse_m64_window(phb);
2791
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002792 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
Gavin Shanaa0c0332013-04-25 19:20:57 +00002793 /* FW Has already off top 64k of M32 space (MSI space) */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002794 phb->ioda.m32_size += 0x10000;
2795
2796 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
Benjamin Herrenschmidt3fd47f02013-05-06 13:40:40 +10002797 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002798 phb->ioda.io_size = hose->pci_io_size;
2799 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
2800 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
2801
Gavin Shanc35d2a82013-07-31 16:47:04 +08002802 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002803 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
2804 m32map_off = size;
Gavin Shane47747f2012-08-20 03:49:19 +00002805 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
Gavin Shanc35d2a82013-07-31 16:47:04 +08002806 if (phb->type == PNV_PHB_IODA1) {
2807 iomap_off = size;
2808 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
2809 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002810 pemap_off = size;
2811 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
Michael Ellermane39f223f2014-11-18 16:47:35 +11002812 aux = memblock_virt_alloc(size, 0);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002813 phb->ioda.pe_alloc = aux;
2814 phb->ioda.m32_segmap = aux + m32map_off;
Gavin Shanc35d2a82013-07-31 16:47:04 +08002815 if (phb->type == PNV_PHB_IODA1)
2816 phb->ioda.io_segmap = aux + iomap_off;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002817 phb->ioda.pe_array = aux + pemap_off;
Gavin Shan36954dc2013-11-04 16:32:47 +08002818 set_bit(phb->ioda.reserved_pe, phb->ioda.pe_alloc);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002819
Gavin Shan7ebdf952012-08-20 03:49:15 +00002820 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002821 INIT_LIST_HEAD(&phb->ioda.pe_list);
Wei Yang781a8682015-03-25 16:23:57 +08002822 mutex_init(&phb->ioda.pe_list_mutex);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002823
2824 /* Calculate how many 32-bit TCE segments we have */
2825 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
2826
Gavin Shanaa0c0332013-04-25 19:20:57 +00002827#if 0 /* We should really do that ... */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002828 rc = opal_pci_set_phb_mem_window(opal->phb_id,
2829 window_type,
2830 window_num,
2831 starting_real_address,
2832 starting_pci_address,
2833 segment_size);
2834#endif
2835
Guo Chao262af552014-07-21 14:42:30 +10002836 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
2837 phb->ioda.total_pe, phb->ioda.reserved_pe,
2838 phb->ioda.m32_size, phb->ioda.m32_segsize);
2839 if (phb->ioda.m64_size)
2840 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
2841 phb->ioda.m64_size, phb->ioda.m64_segsize);
2842 if (phb->ioda.io_size)
2843 pr_info(" IO: 0x%x [segment=0x%x]\n",
2844 phb->ioda.io_size, phb->ioda.io_segsize);
2845
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002846
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002847 phb->hose->ops = &pnv_pci_ops;
Gavin Shan49dec922014-07-21 14:42:33 +10002848 phb->get_pe_state = pnv_ioda_get_pe_state;
2849 phb->freeze_pe = pnv_ioda_freeze_pe;
2850 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002851
2852 /* Setup RID -> PE mapping function */
2853 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
2854
2855 /* Setup TCEs */
2856 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
Gavin Shanfe7e85c2014-09-30 12:39:10 +10002857 phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002858
2859 /* Setup MSI support */
2860 pnv_pci_init_ioda_msis(phb);
2861
Gavin Shanc40a4212012-08-20 03:49:20 +00002862 /*
2863 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
2864 * to let the PCI core do resource assignment. It's supposed
2865 * that the PCI core will do correct I/O and MMIO alignment
2866 * for the P2P bridge bars so that each PCI bus (excluding
2867 * the child P2P bridges) can form individual PE.
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002868 */
Gavin Shanfb446ad2012-08-20 03:49:14 +00002869 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
Daniel Axtens92ae0352015-04-28 15:12:05 +10002870 hose->controller_ops = pnv_pci_ioda_controller_ops;
Michael Ellermanad30cb92015-04-14 09:29:23 +10002871
Wei Yang6e628c72015-03-25 16:23:55 +08002872#ifdef CONFIG_PCI_IOV
2873 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
Wei Yang5350ab32015-03-25 16:23:56 +08002874 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
Michael Ellermanad30cb92015-04-14 09:29:23 +10002875#endif
2876
Gavin Shanc40a4212012-08-20 03:49:20 +00002877 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002878
2879 /* Reset IODA tables to a clean state */
Gavin Shand1a85ee2014-09-30 12:39:05 +10002880 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002881 if (rc)
Benjamin Herrenschmidtf11fe552011-11-29 18:22:50 +00002882 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
Gavin Shan361f2a22014-04-24 18:00:25 +10002883
2884 /* If we're running in kdump kerenl, the previous kerenl never
2885 * shutdown PCI devices correctly. We already got IODA table
2886 * cleaned out. So we have to issue PHB reset to stop all PCI
2887 * transactions from previous kerenl.
2888 */
2889 if (is_kdump_kernel()) {
2890 pr_info(" Issue PHB reset ...\n");
Gavin Shancadf3642015-02-16 14:45:47 +11002891 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
2892 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
Gavin Shan361f2a22014-04-24 18:00:25 +10002893 }
Guo Chao262af552014-07-21 14:42:30 +10002894
Gavin Shan9e9e8932014-11-12 13:36:05 +11002895 /* Remove M64 resource if we can't configure it successfully */
2896 if (!phb->init_m64 || phb->init_m64(phb))
Guo Chao262af552014-07-21 14:42:30 +10002897 hose->mem_resources[1].flags = 0;
Gavin Shanaa0c0332013-04-25 19:20:57 +00002898}
2899
Bjorn Helgaas67975002013-07-02 12:20:03 -06002900void __init pnv_pci_init_ioda2_phb(struct device_node *np)
Gavin Shanaa0c0332013-04-25 19:20:57 +00002901{
Gavin Shane9cc17d2013-06-20 13:21:14 +08002902 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002903}
2904
2905void __init pnv_pci_init_ioda_hub(struct device_node *np)
2906{
2907 struct device_node *phbn;
Alistair Popplec681b932013-09-23 12:04:57 +10002908 const __be64 *prop64;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002909 u64 hub_id;
2910
2911 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
2912
2913 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
2914 if (!prop64) {
2915 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
2916 return;
2917 }
2918 hub_id = be64_to_cpup(prop64);
2919 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
2920
2921 /* Count child PHBs */
2922 for_each_child_of_node(np, phbn) {
2923 /* Look for IODA1 PHBs */
2924 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
Gavin Shane9cc17d2013-06-20 13:21:14 +08002925 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002926 }
2927}