blob: 7eebc76721ea54264089ebb33c510fec24d0627e [file] [log] [blame]
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001/*
2 * Support PCI/PCIe on PowerNV platforms
3 *
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +000012#undef DEBUG
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000013
14#include <linux/kernel.h>
15#include <linux/pci.h>
Gavin Shan361f2a22014-04-24 18:00:25 +100016#include <linux/crash_dump.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000017#include <linux/delay.h>
18#include <linux/string.h>
19#include <linux/init.h>
20#include <linux/bootmem.h>
21#include <linux/irq.h>
22#include <linux/io.h>
23#include <linux/msi.h>
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +110024#include <linux/memblock.h>
Alexey Kardashevskiyac9a5882015-06-05 16:34:56 +100025#include <linux/iommu.h>
Alexey Kardashevskiye57080f2015-06-05 16:35:13 +100026#include <linux/rculist.h>
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +100027#include <linux/sizes.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000028
29#include <asm/sections.h>
30#include <asm/io.h>
31#include <asm/prom.h>
32#include <asm/pci-bridge.h>
33#include <asm/machdep.h>
Gavin Shanfb1b55d2013-03-05 21:12:37 +000034#include <asm/msi_bitmap.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000035#include <asm/ppc-pci.h>
36#include <asm/opal.h>
37#include <asm/iommu.h>
38#include <asm/tce.h>
Gavin Shan137436c2013-04-25 19:20:59 +000039#include <asm/xics.h>
Michael Ellerman7644d582017-02-10 12:04:56 +110040#include <asm/debugfs.h>
Guo Chao262af552014-07-21 14:42:30 +100041#include <asm/firmware.h>
Ian Munsie80c49c72014-10-08 19:54:57 +110042#include <asm/pnv-pci.h>
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +100043#include <asm/mmzone.h>
Ian Munsie80c49c72014-10-08 19:54:57 +110044
Michael Neulingec249dd2015-05-27 16:07:16 +100045#include <misc/cxl-base.h>
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000046
47#include "powernv.h"
48#include "pci.h"
49
Gavin Shan99451552016-05-05 12:02:13 +100050#define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */
51#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
Gavin Shanacce9712016-05-03 15:41:33 +100052#define PNV_IODA1_DMA32_SEGSIZE 0x10000000
Wei Yang781a8682015-03-25 16:23:57 +080053
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +100054#define POWERNV_IOMMU_DEFAULT_LEVELS 1
55#define POWERNV_IOMMU_MAX_LEVELS 5
56
Gavin Shan9497a1c2016-06-21 12:35:56 +100057static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU" };
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +100058static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
59
Alexey Kardashevskiy7d623e42016-04-29 18:55:21 +100060void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
Joe Perches6d31c2f2014-09-21 10:55:06 -070061 const char *fmt, ...)
62{
63 struct va_format vaf;
64 va_list args;
65 char pfix[32];
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +000066
Joe Perches6d31c2f2014-09-21 10:55:06 -070067 va_start(args, fmt);
68
69 vaf.fmt = fmt;
70 vaf.va = &args;
71
Wei Yang781a8682015-03-25 16:23:57 +080072 if (pe->flags & PNV_IODA_PE_DEV)
Joe Perches6d31c2f2014-09-21 10:55:06 -070073 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
Wei Yang781a8682015-03-25 16:23:57 +080074 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
Joe Perches6d31c2f2014-09-21 10:55:06 -070075 sprintf(pfix, "%04x:%02x ",
76 pci_domain_nr(pe->pbus), pe->pbus->number);
Wei Yang781a8682015-03-25 16:23:57 +080077#ifdef CONFIG_PCI_IOV
78 else if (pe->flags & PNV_IODA_PE_VF)
79 sprintf(pfix, "%04x:%02x:%2x.%d",
80 pci_domain_nr(pe->parent_dev->bus),
81 (pe->rid & 0xff00) >> 8,
82 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
83#endif /* CONFIG_PCI_IOV*/
Joe Perches6d31c2f2014-09-21 10:55:06 -070084
Russell Currey1f52f172016-11-16 14:02:15 +110085 printk("%spci %s: [PE# %.2x] %pV",
Joe Perches6d31c2f2014-09-21 10:55:06 -070086 level, pfix, pe->pe_number, &vaf);
87
88 va_end(args);
89}
90
Thadeu Lima de Souza Cascardo4e287842014-10-23 19:19:35 -020091static bool pnv_iommu_bypass_disabled __read_mostly;
92
93static int __init iommu_setup(char *str)
94{
95 if (!str)
96 return -EINVAL;
97
98 while (*str) {
99 if (!strncmp(str, "nobypass", 8)) {
100 pnv_iommu_bypass_disabled = true;
101 pr_info("PowerNV: IOMMU bypass window disabled.\n");
102 break;
103 }
104 str += strcspn(str, ",");
105 if (*str == ',')
106 str++;
107 }
108
109 return 0;
110}
111early_param("iommu", iommu_setup);
112
Benjamin Herrenschmidt5958d192016-07-08 15:55:43 +1000113static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
Guo Chao262af552014-07-21 14:42:30 +1000114{
Benjamin Herrenschmidt5958d192016-07-08 15:55:43 +1000115 /*
116 * WARNING: We cannot rely on the resource flags. The Linux PCI
117 * allocation code sometimes decides to put a 64-bit prefetchable
118 * BAR in the 32-bit window, so we have to compare the addresses.
119 *
120 * For simplicity we only test resource start.
121 */
122 return (r->start >= phb->ioda.m64_base &&
123 r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
Guo Chao262af552014-07-21 14:42:30 +1000124}
125
Russell Curreyb79331a2016-09-14 16:37:17 +1000126static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
127{
128 unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
129
130 return (resource_flags & flags) == flags;
131}
132
Gavin Shan1e916772016-05-03 15:41:36 +1000133static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
134{
Gavin Shan313483d2016-09-28 14:34:56 +1000135 s64 rc;
136
Gavin Shan1e916772016-05-03 15:41:36 +1000137 phb->ioda.pe_array[pe_no].phb = phb;
138 phb->ioda.pe_array[pe_no].pe_number = pe_no;
139
Gavin Shan313483d2016-09-28 14:34:56 +1000140 /*
141 * Clear the PE frozen state as it might be put into frozen state
142 * in the last PCI remove path. It's not harmful to do so when the
143 * PE is already in unfrozen state.
144 */
145 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
146 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
Russell Curreyd4791db2016-11-16 12:12:26 +1100147 if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
Russell Currey1f52f172016-11-16 14:02:15 +1100148 pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
Gavin Shan313483d2016-09-28 14:34:56 +1000149 __func__, rc, phb->hose->global_number, pe_no);
150
Gavin Shan1e916772016-05-03 15:41:36 +1000151 return &phb->ioda.pe_array[pe_no];
152}
153
Gavin Shan4b82ab12014-11-12 13:36:07 +1100154static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
155{
Gavin Shan92b8f132016-05-03 15:41:24 +1000156 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
Russell Currey1f52f172016-11-16 14:02:15 +1100157 pr_warn("%s: Invalid PE %x on PHB#%x\n",
Gavin Shan4b82ab12014-11-12 13:36:07 +1100158 __func__, pe_no, phb->hose->global_number);
159 return;
160 }
161
Gavin Shane9dc4d72015-06-19 12:26:16 +1000162 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
Russell Currey1f52f172016-11-16 14:02:15 +1100163 pr_debug("%s: PE %x was reserved on PHB#%x\n",
Gavin Shane9dc4d72015-06-19 12:26:16 +1000164 __func__, pe_no, phb->hose->global_number);
Gavin Shan4b82ab12014-11-12 13:36:07 +1100165
Gavin Shan1e916772016-05-03 15:41:36 +1000166 pnv_ioda_init_pe(phb, pe_no);
Gavin Shan4b82ab12014-11-12 13:36:07 +1100167}
168
Gavin Shan1e916772016-05-03 15:41:36 +1000169static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000170{
Andrzej Hajda60964812016-08-17 12:03:05 +0200171 long pe;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000172
Gavin Shan9fcd6f42016-05-20 16:41:30 +1000173 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
174 if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
175 return pnv_ioda_init_pe(phb, pe);
176 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000177
Gavin Shan9fcd6f42016-05-20 16:41:30 +1000178 return NULL;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000179}
180
Gavin Shan1e916772016-05-03 15:41:36 +1000181static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000182{
Gavin Shan1e916772016-05-03 15:41:36 +1000183 struct pnv_phb *phb = pe->phb;
Gavin Shancaa58f82016-09-06 14:17:18 +1000184 unsigned int pe_num = pe->pe_number;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000185
Gavin Shan1e916772016-05-03 15:41:36 +1000186 WARN_ON(pe->pdev);
187
188 memset(pe, 0, sizeof(struct pnv_ioda_pe));
Gavin Shancaa58f82016-09-06 14:17:18 +1000189 clear_bit(pe_num, phb->ioda.pe_alloc);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000190}
191
Guo Chao262af552014-07-21 14:42:30 +1000192/* The default M64 BAR is shared by all PEs */
193static int pnv_ioda2_init_m64(struct pnv_phb *phb)
194{
195 const char *desc;
196 struct resource *r;
197 s64 rc;
198
199 /* Configure the default M64 BAR */
200 rc = opal_pci_set_phb_mem_window(phb->opal_id,
201 OPAL_M64_WINDOW_TYPE,
202 phb->ioda.m64_bar_idx,
203 phb->ioda.m64_base,
204 0, /* unused */
205 phb->ioda.m64_size);
206 if (rc != OPAL_SUCCESS) {
207 desc = "configuring";
208 goto fail;
209 }
210
211 /* Enable the default M64 BAR */
212 rc = opal_pci_phb_mmio_enable(phb->opal_id,
213 OPAL_M64_WINDOW_TYPE,
214 phb->ioda.m64_bar_idx,
215 OPAL_ENABLE_M64_SPLIT);
216 if (rc != OPAL_SUCCESS) {
217 desc = "enabling";
218 goto fail;
219 }
220
Guo Chao262af552014-07-21 14:42:30 +1000221 /*
Gavin Shan63803c32016-05-20 16:41:32 +1000222 * Exclude the segments for reserved and root bus PE, which
223 * are first or last two PEs.
Guo Chao262af552014-07-21 14:42:30 +1000224 */
225 r = &phb->hose->mem_resources[1];
Gavin Shan92b8f132016-05-03 15:41:24 +1000226 if (phb->ioda.reserved_pe_idx == 0)
Gavin Shan63803c32016-05-20 16:41:32 +1000227 r->start += (2 * phb->ioda.m64_segsize);
Gavin Shan92b8f132016-05-03 15:41:24 +1000228 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
Gavin Shan63803c32016-05-20 16:41:32 +1000229 r->end -= (2 * phb->ioda.m64_segsize);
Guo Chao262af552014-07-21 14:42:30 +1000230 else
Russell Currey1f52f172016-11-16 14:02:15 +1100231 pr_warn(" Cannot strip M64 segment for reserved PE#%x\n",
Gavin Shan92b8f132016-05-03 15:41:24 +1000232 phb->ioda.reserved_pe_idx);
Guo Chao262af552014-07-21 14:42:30 +1000233
234 return 0;
235
236fail:
237 pr_warn(" Failure %lld %s M64 BAR#%d\n",
238 rc, desc, phb->ioda.m64_bar_idx);
239 opal_pci_phb_mmio_enable(phb->opal_id,
240 OPAL_M64_WINDOW_TYPE,
241 phb->ioda.m64_bar_idx,
242 OPAL_DISABLE_M64);
243 return -EIO;
244}
245
Gavin Shanc4306702016-05-03 15:41:30 +1000246static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
Gavin Shan96a2f922015-06-19 12:26:17 +1000247 unsigned long *pe_bitmap)
Guo Chao262af552014-07-21 14:42:30 +1000248{
Gavin Shan96a2f922015-06-19 12:26:17 +1000249 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
250 struct pnv_phb *phb = hose->private_data;
Guo Chao262af552014-07-21 14:42:30 +1000251 struct resource *r;
Gavin Shan96a2f922015-06-19 12:26:17 +1000252 resource_size_t base, sgsz, start, end;
253 int segno, i;
Guo Chao262af552014-07-21 14:42:30 +1000254
Gavin Shan96a2f922015-06-19 12:26:17 +1000255 base = phb->ioda.m64_base;
256 sgsz = phb->ioda.m64_segsize;
257 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
258 r = &pdev->resource[i];
Benjamin Herrenschmidt5958d192016-07-08 15:55:43 +1000259 if (!r->parent || !pnv_pci_is_m64(phb, r))
Gavin Shan96a2f922015-06-19 12:26:17 +1000260 continue;
Guo Chao262af552014-07-21 14:42:30 +1000261
Gavin Shan96a2f922015-06-19 12:26:17 +1000262 start = _ALIGN_DOWN(r->start - base, sgsz);
263 end = _ALIGN_UP(r->end - base, sgsz);
264 for (segno = start / sgsz; segno < end / sgsz; segno++) {
265 if (pe_bitmap)
266 set_bit(segno, pe_bitmap);
267 else
268 pnv_ioda_reserve_pe(phb, segno);
Guo Chao262af552014-07-21 14:42:30 +1000269 }
270 }
271}
272
Gavin Shan99451552016-05-05 12:02:13 +1000273static int pnv_ioda1_init_m64(struct pnv_phb *phb)
274{
275 struct resource *r;
276 int index;
277
278 /*
279 * There are 16 M64 BARs, each of which has 8 segments. So
280 * there are as many M64 segments as the maximum number of
281 * PEs, which is 128.
282 */
283 for (index = 0; index < PNV_IODA1_M64_NUM; index++) {
284 unsigned long base, segsz = phb->ioda.m64_segsize;
285 int64_t rc;
286
287 base = phb->ioda.m64_base +
288 index * PNV_IODA1_M64_SEGS * segsz;
289 rc = opal_pci_set_phb_mem_window(phb->opal_id,
290 OPAL_M64_WINDOW_TYPE, index, base, 0,
291 PNV_IODA1_M64_SEGS * segsz);
292 if (rc != OPAL_SUCCESS) {
Russell Currey1f52f172016-11-16 14:02:15 +1100293 pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n",
Gavin Shan99451552016-05-05 12:02:13 +1000294 rc, phb->hose->global_number, index);
295 goto fail;
296 }
297
298 rc = opal_pci_phb_mmio_enable(phb->opal_id,
299 OPAL_M64_WINDOW_TYPE, index,
300 OPAL_ENABLE_M64_SPLIT);
301 if (rc != OPAL_SUCCESS) {
Russell Currey1f52f172016-11-16 14:02:15 +1100302 pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n",
Gavin Shan99451552016-05-05 12:02:13 +1000303 rc, phb->hose->global_number, index);
304 goto fail;
305 }
306 }
307
308 /*
Gavin Shan63803c32016-05-20 16:41:32 +1000309 * Exclude the segments for reserved and root bus PE, which
310 * are first or last two PEs.
Gavin Shan99451552016-05-05 12:02:13 +1000311 */
312 r = &phb->hose->mem_resources[1];
313 if (phb->ioda.reserved_pe_idx == 0)
Gavin Shan63803c32016-05-20 16:41:32 +1000314 r->start += (2 * phb->ioda.m64_segsize);
Gavin Shan99451552016-05-05 12:02:13 +1000315 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
Gavin Shan63803c32016-05-20 16:41:32 +1000316 r->end -= (2 * phb->ioda.m64_segsize);
Gavin Shan99451552016-05-05 12:02:13 +1000317 else
Russell Currey1f52f172016-11-16 14:02:15 +1100318 WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
Gavin Shan99451552016-05-05 12:02:13 +1000319 phb->ioda.reserved_pe_idx, phb->hose->global_number);
320
321 return 0;
322
323fail:
324 for ( ; index >= 0; index--)
325 opal_pci_phb_mmio_enable(phb->opal_id,
326 OPAL_M64_WINDOW_TYPE, index, OPAL_DISABLE_M64);
327
328 return -EIO;
329}
330
Gavin Shanc4306702016-05-03 15:41:30 +1000331static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
332 unsigned long *pe_bitmap,
333 bool all)
Guo Chao262af552014-07-21 14:42:30 +1000334{
Guo Chao262af552014-07-21 14:42:30 +1000335 struct pci_dev *pdev;
Gavin Shan96a2f922015-06-19 12:26:17 +1000336
337 list_for_each_entry(pdev, &bus->devices, bus_list) {
Gavin Shanc4306702016-05-03 15:41:30 +1000338 pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap);
Gavin Shan96a2f922015-06-19 12:26:17 +1000339
340 if (all && pdev->subordinate)
Gavin Shanc4306702016-05-03 15:41:30 +1000341 pnv_ioda_reserve_m64_pe(pdev->subordinate,
342 pe_bitmap, all);
Gavin Shan96a2f922015-06-19 12:26:17 +1000343 }
344}
345
Gavin Shan1e916772016-05-03 15:41:36 +1000346static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
Guo Chao262af552014-07-21 14:42:30 +1000347{
Gavin Shan26ba2482015-06-19 12:26:19 +1000348 struct pci_controller *hose = pci_bus_to_host(bus);
349 struct pnv_phb *phb = hose->private_data;
Guo Chao262af552014-07-21 14:42:30 +1000350 struct pnv_ioda_pe *master_pe, *pe;
351 unsigned long size, *pe_alloc;
Gavin Shan26ba2482015-06-19 12:26:19 +1000352 int i;
Guo Chao262af552014-07-21 14:42:30 +1000353
354 /* Root bus shouldn't use M64 */
355 if (pci_is_root_bus(bus))
Gavin Shan1e916772016-05-03 15:41:36 +1000356 return NULL;
Guo Chao262af552014-07-21 14:42:30 +1000357
Guo Chao262af552014-07-21 14:42:30 +1000358 /* Allocate bitmap */
Gavin Shan92b8f132016-05-03 15:41:24 +1000359 size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
Guo Chao262af552014-07-21 14:42:30 +1000360 pe_alloc = kzalloc(size, GFP_KERNEL);
361 if (!pe_alloc) {
362 pr_warn("%s: Out of memory !\n",
363 __func__);
Gavin Shan1e916772016-05-03 15:41:36 +1000364 return NULL;
Guo Chao262af552014-07-21 14:42:30 +1000365 }
366
Gavin Shan26ba2482015-06-19 12:26:19 +1000367 /* Figure out reserved PE numbers by the PE */
Gavin Shanc4306702016-05-03 15:41:30 +1000368 pnv_ioda_reserve_m64_pe(bus, pe_alloc, all);
Guo Chao262af552014-07-21 14:42:30 +1000369
370 /*
371 * the current bus might not own M64 window and that's all
372 * contributed by its child buses. For the case, we needn't
373 * pick M64 dependent PE#.
374 */
Gavin Shan92b8f132016-05-03 15:41:24 +1000375 if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) {
Guo Chao262af552014-07-21 14:42:30 +1000376 kfree(pe_alloc);
Gavin Shan1e916772016-05-03 15:41:36 +1000377 return NULL;
Guo Chao262af552014-07-21 14:42:30 +1000378 }
379
380 /*
381 * Figure out the master PE and put all slave PEs to master
382 * PE's list to form compound PE.
383 */
Guo Chao262af552014-07-21 14:42:30 +1000384 master_pe = NULL;
385 i = -1;
Gavin Shan92b8f132016-05-03 15:41:24 +1000386 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) <
387 phb->ioda.total_pe_num) {
Guo Chao262af552014-07-21 14:42:30 +1000388 pe = &phb->ioda.pe_array[i];
Guo Chao262af552014-07-21 14:42:30 +1000389
Gavin Shan93289d82016-05-03 15:41:29 +1000390 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number;
Guo Chao262af552014-07-21 14:42:30 +1000391 if (!master_pe) {
392 pe->flags |= PNV_IODA_PE_MASTER;
393 INIT_LIST_HEAD(&pe->slaves);
394 master_pe = pe;
395 } else {
396 pe->flags |= PNV_IODA_PE_SLAVE;
397 pe->master = master_pe;
398 list_add_tail(&pe->list, &master_pe->slaves);
399 }
Gavin Shan99451552016-05-05 12:02:13 +1000400
401 /*
402 * P7IOC supports M64DT, which helps mapping M64 segment
403 * to one particular PE#. However, PHB3 has fixed mapping
404 * between M64 segment and PE#. In order to have same logic
405 * for P7IOC and PHB3, we enforce fixed mapping between M64
406 * segment and PE# on P7IOC.
407 */
408 if (phb->type == PNV_PHB_IODA1) {
409 int64_t rc;
410
411 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
412 pe->pe_number, OPAL_M64_WINDOW_TYPE,
413 pe->pe_number / PNV_IODA1_M64_SEGS,
414 pe->pe_number % PNV_IODA1_M64_SEGS);
415 if (rc != OPAL_SUCCESS)
Russell Currey1f52f172016-11-16 14:02:15 +1100416 pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
Gavin Shan99451552016-05-05 12:02:13 +1000417 __func__, rc, phb->hose->global_number,
418 pe->pe_number);
419 }
Guo Chao262af552014-07-21 14:42:30 +1000420 }
421
422 kfree(pe_alloc);
Gavin Shan1e916772016-05-03 15:41:36 +1000423 return master_pe;
Guo Chao262af552014-07-21 14:42:30 +1000424}
425
426static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
427{
428 struct pci_controller *hose = phb->hose;
429 struct device_node *dn = hose->dn;
430 struct resource *res;
Benjamin Herrenschmidta1339fa2016-07-08 16:37:16 +1000431 u32 m64_range[2], i;
Gavin Shan0e7736c2016-08-02 14:10:35 +1000432 const __be32 *r;
Guo Chao262af552014-07-21 14:42:30 +1000433 u64 pci_addr;
434
Gavin Shan99451552016-05-05 12:02:13 +1000435 if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) {
Gavin Shan1665c4a2014-11-12 13:36:04 +1100436 pr_info(" Not support M64 window\n");
437 return;
438 }
439
Stewart Smithe4d54f72015-12-09 17:18:20 +1100440 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
Guo Chao262af552014-07-21 14:42:30 +1000441 pr_info(" Firmware too old to support M64 window\n");
442 return;
443 }
444
445 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
446 if (!r) {
447 pr_info(" No <ibm,opal-m64-window> on %s\n",
448 dn->full_name);
449 return;
450 }
451
Benjamin Herrenschmidta1339fa2016-07-08 16:37:16 +1000452 /*
453 * Find the available M64 BAR range and pickup the last one for
454 * covering the whole 64-bits space. We support only one range.
455 */
456 if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
457 m64_range, 2)) {
458 /* In absence of the property, assume 0..15 */
459 m64_range[0] = 0;
460 m64_range[1] = 16;
461 }
462 /* We only support 64 bits in our allocator */
463 if (m64_range[1] > 63) {
464 pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
465 __func__, m64_range[1], phb->hose->global_number);
466 m64_range[1] = 63;
467 }
468 /* Empty range, no m64 */
469 if (m64_range[1] <= m64_range[0]) {
470 pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
471 __func__, phb->hose->global_number);
472 return;
473 }
474
475 /* Configure M64 informations */
Guo Chao262af552014-07-21 14:42:30 +1000476 res = &hose->mem_resources[1];
Gavin Shane80c4e72015-10-22 12:03:08 +1100477 res->name = dn->full_name;
Guo Chao262af552014-07-21 14:42:30 +1000478 res->start = of_translate_address(dn, r + 2);
479 res->end = res->start + of_read_number(r + 4, 2) - 1;
480 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
481 pci_addr = of_read_number(r, 2);
482 hose->mem_offset[1] = res->start - pci_addr;
483
484 phb->ioda.m64_size = resource_size(res);
Gavin Shan92b8f132016-05-03 15:41:24 +1000485 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
Guo Chao262af552014-07-21 14:42:30 +1000486 phb->ioda.m64_base = pci_addr;
487
Benjamin Herrenschmidta1339fa2016-07-08 16:37:16 +1000488 /* This lines up nicely with the display from processing OF ranges */
489 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
490 res->start, res->end, pci_addr, m64_range[0],
491 m64_range[0] + m64_range[1] - 1);
492
493 /* Mark all M64 used up by default */
494 phb->ioda.m64_bar_alloc = (unsigned long)-1;
Wei Yange9863e62014-12-12 12:39:37 +0800495
Guo Chao262af552014-07-21 14:42:30 +1000496 /* Use last M64 BAR to cover M64 window */
Benjamin Herrenschmidta1339fa2016-07-08 16:37:16 +1000497 m64_range[1]--;
498 phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
499
500 pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
501
502 /* Mark remaining ones free */
503 for (i = m64_range[0]; i < m64_range[1]; i++)
504 clear_bit(i, &phb->ioda.m64_bar_alloc);
505
506 /*
507 * Setup init functions for M64 based on IODA version, IODA3 uses
508 * the IODA2 code.
509 */
Gavin Shan99451552016-05-05 12:02:13 +1000510 if (phb->type == PNV_PHB_IODA1)
511 phb->init_m64 = pnv_ioda1_init_m64;
512 else
513 phb->init_m64 = pnv_ioda2_init_m64;
Gavin Shanc4306702016-05-03 15:41:30 +1000514 phb->reserve_m64_pe = pnv_ioda_reserve_m64_pe;
515 phb->pick_m64_pe = pnv_ioda_pick_m64_pe;
Guo Chao262af552014-07-21 14:42:30 +1000516}
517
Gavin Shan49dec922014-07-21 14:42:33 +1000518static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
519{
520 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no];
521 struct pnv_ioda_pe *slave;
522 s64 rc;
523
524 /* Fetch master PE */
525 if (pe->flags & PNV_IODA_PE_SLAVE) {
526 pe = pe->master;
Gavin Shanec8e4e92014-11-12 13:36:10 +1100527 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
528 return;
529
Gavin Shan49dec922014-07-21 14:42:33 +1000530 pe_no = pe->pe_number;
531 }
532
533 /* Freeze master PE */
534 rc = opal_pci_eeh_freeze_set(phb->opal_id,
535 pe_no,
536 OPAL_EEH_ACTION_SET_FREEZE_ALL);
537 if (rc != OPAL_SUCCESS) {
538 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
539 __func__, rc, phb->hose->global_number, pe_no);
540 return;
541 }
542
543 /* Freeze slave PEs */
544 if (!(pe->flags & PNV_IODA_PE_MASTER))
545 return;
546
547 list_for_each_entry(slave, &pe->slaves, list) {
548 rc = opal_pci_eeh_freeze_set(phb->opal_id,
549 slave->pe_number,
550 OPAL_EEH_ACTION_SET_FREEZE_ALL);
551 if (rc != OPAL_SUCCESS)
552 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n",
553 __func__, rc, phb->hose->global_number,
554 slave->pe_number);
555 }
556}
557
Anton Blancharde51df2c2014-08-20 08:55:18 +1000558static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
Gavin Shan49dec922014-07-21 14:42:33 +1000559{
560 struct pnv_ioda_pe *pe, *slave;
561 s64 rc;
562
563 /* Find master PE */
564 pe = &phb->ioda.pe_array[pe_no];
565 if (pe->flags & PNV_IODA_PE_SLAVE) {
566 pe = pe->master;
567 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
568 pe_no = pe->pe_number;
569 }
570
571 /* Clear frozen state for master PE */
572 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt);
573 if (rc != OPAL_SUCCESS) {
574 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
575 __func__, rc, opt, phb->hose->global_number, pe_no);
576 return -EIO;
577 }
578
579 if (!(pe->flags & PNV_IODA_PE_MASTER))
580 return 0;
581
582 /* Clear frozen state for slave PEs */
583 list_for_each_entry(slave, &pe->slaves, list) {
584 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
585 slave->pe_number,
586 opt);
587 if (rc != OPAL_SUCCESS) {
588 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n",
589 __func__, rc, opt, phb->hose->global_number,
590 slave->pe_number);
591 return -EIO;
592 }
593 }
594
595 return 0;
596}
597
598static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
599{
600 struct pnv_ioda_pe *slave, *pe;
601 u8 fstate, state;
602 __be16 pcierr;
603 s64 rc;
604
605 /* Sanity check on PE number */
Gavin Shan92b8f132016-05-03 15:41:24 +1000606 if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num)
Gavin Shan49dec922014-07-21 14:42:33 +1000607 return OPAL_EEH_STOPPED_PERM_UNAVAIL;
608
609 /*
610 * Fetch the master PE and the PE instance might be
611 * not initialized yet.
612 */
613 pe = &phb->ioda.pe_array[pe_no];
614 if (pe->flags & PNV_IODA_PE_SLAVE) {
615 pe = pe->master;
616 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER));
617 pe_no = pe->pe_number;
618 }
619
620 /* Check the master PE */
621 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
622 &state, &pcierr, NULL);
623 if (rc != OPAL_SUCCESS) {
624 pr_warn("%s: Failure %lld getting "
625 "PHB#%x-PE#%x state\n",
626 __func__, rc,
627 phb->hose->global_number, pe_no);
628 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
629 }
630
631 /* Check the slave PE */
632 if (!(pe->flags & PNV_IODA_PE_MASTER))
633 return state;
634
635 list_for_each_entry(slave, &pe->slaves, list) {
636 rc = opal_pci_eeh_freeze_status(phb->opal_id,
637 slave->pe_number,
638 &fstate,
639 &pcierr,
640 NULL);
641 if (rc != OPAL_SUCCESS) {
642 pr_warn("%s: Failure %lld getting "
643 "PHB#%x-PE#%x state\n",
644 __func__, rc,
645 phb->hose->global_number, slave->pe_number);
646 return OPAL_EEH_STOPPED_TEMP_UNAVAIL;
647 }
648
649 /*
650 * Override the result based on the ascending
651 * priority.
652 */
653 if (fstate > state)
654 state = fstate;
655 }
656
657 return state;
658}
659
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000660/* Currently those 2 are only used when MSIs are enabled, this will change
661 * but in the meantime, we need to protect them to avoid warnings
662 */
663#ifdef CONFIG_PCI_MSI
Ian Munsief4568342016-07-14 07:17:00 +1000664struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000665{
666 struct pci_controller *hose = pci_bus_to_host(dev->bus);
667 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +0000668 struct pci_dn *pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000669
670 if (!pdn)
671 return NULL;
672 if (pdn->pe_number == IODA_INVALID_PE)
673 return NULL;
674 return &phb->ioda.pe_array[pdn->pe_number];
675}
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000676#endif /* CONFIG_PCI_MSI */
677
Gavin Shanb131a842014-11-12 13:36:08 +1100678static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
679 struct pnv_ioda_pe *parent,
680 struct pnv_ioda_pe *child,
681 bool is_add)
682{
683 const char *desc = is_add ? "adding" : "removing";
684 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
685 OPAL_REMOVE_PE_FROM_DOMAIN;
686 struct pnv_ioda_pe *slave;
687 long rc;
688
689 /* Parent PE affects child PE */
690 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
691 child->pe_number, op);
692 if (rc != OPAL_SUCCESS) {
693 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
694 rc, desc);
695 return -ENXIO;
696 }
697
698 if (!(child->flags & PNV_IODA_PE_MASTER))
699 return 0;
700
701 /* Compound case: parent PE affects slave PEs */
702 list_for_each_entry(slave, &child->slaves, list) {
703 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
704 slave->pe_number, op);
705 if (rc != OPAL_SUCCESS) {
706 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
707 rc, desc);
708 return -ENXIO;
709 }
710 }
711
712 return 0;
713}
714
715static int pnv_ioda_set_peltv(struct pnv_phb *phb,
716 struct pnv_ioda_pe *pe,
717 bool is_add)
718{
719 struct pnv_ioda_pe *slave;
Wei Yang781a8682015-03-25 16:23:57 +0800720 struct pci_dev *pdev = NULL;
Gavin Shanb131a842014-11-12 13:36:08 +1100721 int ret;
722
723 /*
724 * Clear PE frozen state. If it's master PE, we need
725 * clear slave PE frozen state as well.
726 */
727 if (is_add) {
728 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
729 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
730 if (pe->flags & PNV_IODA_PE_MASTER) {
731 list_for_each_entry(slave, &pe->slaves, list)
732 opal_pci_eeh_freeze_clear(phb->opal_id,
733 slave->pe_number,
734 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
735 }
736 }
737
738 /*
739 * Associate PE in PELT. We need add the PE into the
740 * corresponding PELT-V as well. Otherwise, the error
741 * originated from the PE might contribute to other
742 * PEs.
743 */
744 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
745 if (ret)
746 return ret;
747
748 /* For compound PEs, any one affects all of them */
749 if (pe->flags & PNV_IODA_PE_MASTER) {
750 list_for_each_entry(slave, &pe->slaves, list) {
751 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
752 if (ret)
753 return ret;
754 }
755 }
756
757 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
758 pdev = pe->pbus->self;
Wei Yang781a8682015-03-25 16:23:57 +0800759 else if (pe->flags & PNV_IODA_PE_DEV)
Gavin Shanb131a842014-11-12 13:36:08 +1100760 pdev = pe->pdev->bus->self;
Wei Yang781a8682015-03-25 16:23:57 +0800761#ifdef CONFIG_PCI_IOV
762 else if (pe->flags & PNV_IODA_PE_VF)
Gavin Shan283e2d82015-06-22 13:45:47 +1000763 pdev = pe->parent_dev;
Wei Yang781a8682015-03-25 16:23:57 +0800764#endif /* CONFIG_PCI_IOV */
Gavin Shanb131a842014-11-12 13:36:08 +1100765 while (pdev) {
766 struct pci_dn *pdn = pci_get_pdn(pdev);
767 struct pnv_ioda_pe *parent;
768
769 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
770 parent = &phb->ioda.pe_array[pdn->pe_number];
771 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
772 if (ret)
773 return ret;
774 }
775
776 pdev = pdev->bus->self;
777 }
778
779 return 0;
780}
781
Wei Yang781a8682015-03-25 16:23:57 +0800782static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
783{
784 struct pci_dev *parent;
785 uint8_t bcomp, dcomp, fcomp;
786 int64_t rc;
787 long rid_end, rid;
788
789 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
790 if (pe->pbus) {
791 int count;
792
793 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
794 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
795 parent = pe->pbus->self;
796 if (pe->flags & PNV_IODA_PE_BUS_ALL)
797 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
798 else
799 count = 1;
800
801 switch(count) {
802 case 1: bcomp = OpalPciBusAll; break;
803 case 2: bcomp = OpalPciBus7Bits; break;
804 case 4: bcomp = OpalPciBus6Bits; break;
805 case 8: bcomp = OpalPciBus5Bits; break;
806 case 16: bcomp = OpalPciBus4Bits; break;
807 case 32: bcomp = OpalPciBus3Bits; break;
808 default:
809 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
810 count);
811 /* Do an exact match only */
812 bcomp = OpalPciBusAll;
813 }
814 rid_end = pe->rid + (count << 8);
815 } else {
Gavin Shan93e01a52016-05-20 16:41:34 +1000816#ifdef CONFIG_PCI_IOV
Wei Yang781a8682015-03-25 16:23:57 +0800817 if (pe->flags & PNV_IODA_PE_VF)
818 parent = pe->parent_dev;
819 else
Gavin Shan93e01a52016-05-20 16:41:34 +1000820#endif
Wei Yang781a8682015-03-25 16:23:57 +0800821 parent = pe->pdev->bus->self;
822 bcomp = OpalPciBusAll;
823 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
824 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
825 rid_end = pe->rid + 1;
826 }
827
828 /* Clear the reverse map */
829 for (rid = pe->rid; rid < rid_end; rid++)
Gavin Shanc1275622016-05-20 16:41:29 +1000830 phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
Wei Yang781a8682015-03-25 16:23:57 +0800831
832 /* Release from all parents PELT-V */
833 while (parent) {
834 struct pci_dn *pdn = pci_get_pdn(parent);
835 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
836 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
837 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
838 /* XXX What to do in case of error ? */
839 }
840 parent = parent->bus->self;
841 }
842
Gavin Shanf951e512015-06-23 17:01:13 +1000843 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
Wei Yang781a8682015-03-25 16:23:57 +0800844 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
845
846 /* Disassociate PE in PELT */
847 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
848 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
849 if (rc)
850 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
851 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
852 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
853 if (rc)
854 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
855
856 pe->pbus = NULL;
857 pe->pdev = NULL;
Gavin Shan93e01a52016-05-20 16:41:34 +1000858#ifdef CONFIG_PCI_IOV
Wei Yang781a8682015-03-25 16:23:57 +0800859 pe->parent_dev = NULL;
Gavin Shan93e01a52016-05-20 16:41:34 +1000860#endif
Wei Yang781a8682015-03-25 16:23:57 +0800861
862 return 0;
863}
Wei Yang781a8682015-03-25 16:23:57 +0800864
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -0800865static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000866{
867 struct pci_dev *parent;
868 uint8_t bcomp, dcomp, fcomp;
869 long rc, rid_end, rid;
870
871 /* Bus validation ? */
872 if (pe->pbus) {
873 int count;
874
875 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
876 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
877 parent = pe->pbus->self;
Gavin Shanfb446ad2012-08-20 03:49:14 +0000878 if (pe->flags & PNV_IODA_PE_BUS_ALL)
879 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
880 else
881 count = 1;
882
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000883 switch(count) {
884 case 1: bcomp = OpalPciBusAll; break;
885 case 2: bcomp = OpalPciBus7Bits; break;
886 case 4: bcomp = OpalPciBus6Bits; break;
887 case 8: bcomp = OpalPciBus5Bits; break;
888 case 16: bcomp = OpalPciBus4Bits; break;
889 case 32: bcomp = OpalPciBus3Bits; break;
890 default:
Wei Yang781a8682015-03-25 16:23:57 +0800891 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
892 count);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000893 /* Do an exact match only */
894 bcomp = OpalPciBusAll;
895 }
896 rid_end = pe->rid + (count << 8);
897 } else {
Wei Yang781a8682015-03-25 16:23:57 +0800898#ifdef CONFIG_PCI_IOV
899 if (pe->flags & PNV_IODA_PE_VF)
900 parent = pe->parent_dev;
901 else
902#endif /* CONFIG_PCI_IOV */
903 parent = pe->pdev->bus->self;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000904 bcomp = OpalPciBusAll;
905 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
906 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
907 rid_end = pe->rid + 1;
908 }
909
Gavin Shan631ad692013-11-04 16:32:46 +0800910 /*
911 * Associate PE in PELT. We need add the PE into the
912 * corresponding PELT-V as well. Otherwise, the error
913 * originated from the PE might contribute to other
914 * PEs.
915 */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000916 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
917 bcomp, dcomp, fcomp, OPAL_MAP_PE);
918 if (rc) {
919 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
920 return -ENXIO;
921 }
Gavin Shan631ad692013-11-04 16:32:46 +0800922
Alistair Popple5d2aa712015-12-17 13:43:13 +1100923 /*
924 * Configure PELTV. NPUs don't have a PELTV table so skip
925 * configuration on them.
926 */
927 if (phb->type != PNV_PHB_NPU)
928 pnv_ioda_set_peltv(phb, pe, true);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000929
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000930 /* Setup reverse map */
931 for (rid = pe->rid; rid < rid_end; rid++)
932 phb->ioda.pe_rmap[rid] = pe->pe_number;
933
934 /* Setup one MVTs on IODA1 */
Gavin Shan4773f762014-11-12 13:36:09 +1100935 if (phb->type != PNV_PHB_IODA1) {
936 pe->mve_number = 0;
937 goto out;
938 }
939
940 pe->mve_number = pe->pe_number;
941 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
942 if (rc != OPAL_SUCCESS) {
Russell Currey1f52f172016-11-16 14:02:15 +1100943 pe_err(pe, "OPAL error %ld setting up MVE %x\n",
Gavin Shan4773f762014-11-12 13:36:09 +1100944 rc, pe->mve_number);
945 pe->mve_number = -1;
946 } else {
947 rc = opal_pci_set_mve_enable(phb->opal_id,
948 pe->mve_number, OPAL_ENABLE_MVE);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000949 if (rc) {
Russell Currey1f52f172016-11-16 14:02:15 +1100950 pe_err(pe, "OPAL error %ld enabling MVE %x\n",
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000951 rc, pe->mve_number);
952 pe->mve_number = -1;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000953 }
Gavin Shan4773f762014-11-12 13:36:09 +1100954 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000955
Gavin Shan4773f762014-11-12 13:36:09 +1100956out:
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +0000957 return 0;
958}
959
Wei Yang781a8682015-03-25 16:23:57 +0800960#ifdef CONFIG_PCI_IOV
961static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
962{
963 struct pci_dn *pdn = pci_get_pdn(dev);
964 int i;
965 struct resource *res, res2;
966 resource_size_t size;
967 u16 num_vfs;
968
969 if (!dev->is_physfn)
970 return -EINVAL;
971
972 /*
973 * "offset" is in VFs. The M64 windows are sized so that when they
974 * are segmented, each segment is the same size as the IOV BAR.
975 * Each segment is in a separate PE, and the high order bits of the
976 * address are the PE number. Therefore, each VF's BAR is in a
977 * separate PE, and changing the IOV BAR start address changes the
978 * range of PEs the VFs are in.
979 */
980 num_vfs = pdn->num_vfs;
981 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
982 res = &dev->resource[i + PCI_IOV_RESOURCES];
983 if (!res->flags || !res->parent)
984 continue;
985
Wei Yang781a8682015-03-25 16:23:57 +0800986 /*
987 * The actual IOV BAR range is determined by the start address
988 * and the actual size for num_vfs VFs BAR. This check is to
989 * make sure that after shifting, the range will not overlap
990 * with another device.
991 */
992 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
993 res2.flags = res->flags;
994 res2.start = res->start + (size * offset);
995 res2.end = res2.start + (size * num_vfs) - 1;
996
997 if (res2.end > res->end) {
998 dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
999 i, &res2, res, num_vfs, offset);
1000 return -EBUSY;
1001 }
1002 }
1003
1004 /*
1005 * After doing so, there would be a "hole" in the /proc/iomem when
1006 * offset is a positive value. It looks like the device return some
1007 * mmio back to the system, which actually no one could use it.
1008 */
1009 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1010 res = &dev->resource[i + PCI_IOV_RESOURCES];
1011 if (!res->flags || !res->parent)
1012 continue;
1013
Wei Yang781a8682015-03-25 16:23:57 +08001014 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
1015 res2 = *res;
1016 res->start += size * offset;
1017
Wei Yang74703cc2015-07-20 18:14:58 +08001018 dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
1019 i, &res2, res, (offset > 0) ? "En" : "Dis",
1020 num_vfs, offset);
Wei Yang781a8682015-03-25 16:23:57 +08001021 pci_update_resource(dev, i + PCI_IOV_RESOURCES);
1022 }
1023 return 0;
1024}
1025#endif /* CONFIG_PCI_IOV */
1026
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001027static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001028{
1029 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1030 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00001031 struct pci_dn *pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001032 struct pnv_ioda_pe *pe;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001033
1034 if (!pdn) {
1035 pr_err("%s: Device tree node not associated properly\n",
1036 pci_name(dev));
1037 return NULL;
1038 }
1039 if (pdn->pe_number != IODA_INVALID_PE)
1040 return NULL;
1041
Gavin Shan1e916772016-05-03 15:41:36 +10001042 pe = pnv_ioda_alloc_pe(phb);
1043 if (!pe) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001044 pr_warning("%s: Not enough PE# available, disabling device\n",
1045 pci_name(dev));
1046 return NULL;
1047 }
1048
1049 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
1050 * pointer in the PE data structure, both should be destroyed at the
1051 * same time. However, this needs to be looked at more closely again
1052 * once we actually start removing things (Hotplug, SR-IOV, ...)
1053 *
1054 * At some point we want to remove the PDN completely anyways
1055 */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001056 pci_dev_get(dev);
1057 pdn->pcidev = dev;
Gavin Shan1e916772016-05-03 15:41:36 +10001058 pdn->pe_number = pe->pe_number;
Alistair Popple5d2aa712015-12-17 13:43:13 +11001059 pe->flags = PNV_IODA_PE_DEV;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001060 pe->pdev = dev;
1061 pe->pbus = NULL;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001062 pe->mve_number = -1;
1063 pe->rid = dev->bus->number << 8 | pdn->devfn;
1064
1065 pe_info(pe, "Associated device to PE\n");
1066
1067 if (pnv_ioda_configure_pe(phb, pe)) {
1068 /* XXX What do we do here ? */
Gavin Shan1e916772016-05-03 15:41:36 +10001069 pnv_ioda_free_pe(pe);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001070 pdn->pe_number = IODA_INVALID_PE;
1071 pe->pdev = NULL;
1072 pci_dev_put(dev);
1073 return NULL;
1074 }
1075
Alexey Kardashevskiy1d4e89c2016-05-12 15:47:10 +10001076 /* Put PE to the list */
1077 list_add_tail(&pe->list, &phb->ioda.pe_list);
1078
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001079 return pe;
1080}
1081
1082static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
1083{
1084 struct pci_dev *dev;
1085
1086 list_for_each_entry(dev, &bus->devices, bus_list) {
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00001087 struct pci_dn *pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001088
1089 if (pdn == NULL) {
1090 pr_warn("%s: No device node associated with device !\n",
1091 pci_name(dev));
1092 continue;
1093 }
Gavin Shanccd1c192016-05-20 16:41:31 +10001094
1095 /*
1096 * In partial hotplug case, the PCI device might be still
1097 * associated with the PE and needn't attach it to the PE
1098 * again.
1099 */
1100 if (pdn->pe_number != IODA_INVALID_PE)
1101 continue;
1102
Gavin Shanc5f77002016-05-20 16:41:35 +10001103 pe->device_count++;
Alistair Popple94973b22015-12-17 13:43:11 +11001104 pdn->pcidev = dev;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001105 pdn->pe_number = pe->pe_number;
Gavin Shanfb446ad2012-08-20 03:49:14 +00001106 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001107 pnv_ioda_setup_same_PE(dev->subordinate, pe);
1108 }
1109}
1110
Gavin Shanfb446ad2012-08-20 03:49:14 +00001111/*
1112 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
1113 * single PCI bus. Another one that contains the primary PCI bus and its
1114 * subordinate PCI devices and buses. The second type of PE is normally
1115 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
1116 */
Gavin Shan1e916772016-05-03 15:41:36 +10001117static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001118{
Gavin Shanfb446ad2012-08-20 03:49:14 +00001119 struct pci_controller *hose = pci_bus_to_host(bus);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001120 struct pnv_phb *phb = hose->private_data;
Gavin Shan1e916772016-05-03 15:41:36 +10001121 struct pnv_ioda_pe *pe = NULL;
Gavin Shanccd1c192016-05-20 16:41:31 +10001122 unsigned int pe_num;
1123
1124 /*
1125 * In partial hotplug case, the PE instance might be still alive.
1126 * We should reuse it instead of allocating a new one.
1127 */
1128 pe_num = phb->ioda.pe_rmap[bus->number << 8];
1129 if (pe_num != IODA_INVALID_PE) {
1130 pe = &phb->ioda.pe_array[pe_num];
1131 pnv_ioda_setup_same_PE(bus, pe);
1132 return NULL;
1133 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001134
Gavin Shan63803c32016-05-20 16:41:32 +10001135 /* PE number for root bus should have been reserved */
1136 if (pci_is_root_bus(bus) &&
1137 phb->ioda.root_pe_idx != IODA_INVALID_PE)
1138 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
1139
Guo Chao262af552014-07-21 14:42:30 +10001140 /* Check if PE is determined by M64 */
Gavin Shan63803c32016-05-20 16:41:32 +10001141 if (!pe && phb->pick_m64_pe)
Gavin Shan1e916772016-05-03 15:41:36 +10001142 pe = phb->pick_m64_pe(bus, all);
Guo Chao262af552014-07-21 14:42:30 +10001143
1144 /* The PE number isn't pinned by M64 */
Gavin Shan1e916772016-05-03 15:41:36 +10001145 if (!pe)
1146 pe = pnv_ioda_alloc_pe(phb);
Guo Chao262af552014-07-21 14:42:30 +10001147
Gavin Shan1e916772016-05-03 15:41:36 +10001148 if (!pe) {
Gavin Shanfb446ad2012-08-20 03:49:14 +00001149 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
1150 __func__, pci_domain_nr(bus), bus->number);
Gavin Shan1e916772016-05-03 15:41:36 +10001151 return NULL;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001152 }
1153
Guo Chao262af552014-07-21 14:42:30 +10001154 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001155 pe->pbus = bus;
1156 pe->pdev = NULL;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001157 pe->mve_number = -1;
Yinghai Lub918c622012-05-17 18:51:11 -07001158 pe->rid = bus->busn_res.start << 8;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001159
Gavin Shanfb446ad2012-08-20 03:49:14 +00001160 if (all)
Russell Currey1f52f172016-11-16 14:02:15 +11001161 pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n",
Gavin Shan1e916772016-05-03 15:41:36 +10001162 bus->busn_res.start, bus->busn_res.end, pe->pe_number);
Gavin Shanfb446ad2012-08-20 03:49:14 +00001163 else
Russell Currey1f52f172016-11-16 14:02:15 +11001164 pe_info(pe, "Secondary bus %d associated with PE#%x\n",
Gavin Shan1e916772016-05-03 15:41:36 +10001165 bus->busn_res.start, pe->pe_number);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001166
1167 if (pnv_ioda_configure_pe(phb, pe)) {
1168 /* XXX What do we do here ? */
Gavin Shan1e916772016-05-03 15:41:36 +10001169 pnv_ioda_free_pe(pe);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001170 pe->pbus = NULL;
Gavin Shan1e916772016-05-03 15:41:36 +10001171 return NULL;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001172 }
1173
1174 /* Associate it with all child devices */
1175 pnv_ioda_setup_same_PE(bus, pe);
1176
Gavin Shan7ebdf952012-08-20 03:49:15 +00001177 /* Put PE to the list */
1178 list_add_tail(&pe->list, &phb->ioda.pe_list);
Gavin Shan1e916772016-05-03 15:41:36 +10001179
1180 return pe;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001181}
1182
Alistair Poppleb5215492016-01-11 16:53:49 +11001183static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
Alistair Popple5d2aa712015-12-17 13:43:13 +11001184{
Alistair Poppleb5215492016-01-11 16:53:49 +11001185 int pe_num, found_pe = false, rc;
1186 long rid;
1187 struct pnv_ioda_pe *pe;
1188 struct pci_dev *gpu_pdev;
1189 struct pci_dn *npu_pdn;
1190 struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
1191 struct pnv_phb *phb = hose->private_data;
1192
1193 /*
1194 * Due to a hardware errata PE#0 on the NPU is reserved for
1195 * error handling. This means we only have three PEs remaining
1196 * which need to be assigned to four links, implying some
1197 * links must share PEs.
1198 *
1199 * To achieve this we assign PEs such that NPUs linking the
1200 * same GPU get assigned the same PE.
1201 */
1202 gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
Gavin Shan92b8f132016-05-03 15:41:24 +10001203 for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
Alistair Poppleb5215492016-01-11 16:53:49 +11001204 pe = &phb->ioda.pe_array[pe_num];
1205 if (!pe->pdev)
1206 continue;
1207
1208 if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
1209 /*
1210 * This device has the same peer GPU so should
1211 * be assigned the same PE as the existing
1212 * peer NPU.
1213 */
1214 dev_info(&npu_pdev->dev,
Russell Currey1f52f172016-11-16 14:02:15 +11001215 "Associating to existing PE %x\n", pe_num);
Alistair Poppleb5215492016-01-11 16:53:49 +11001216 pci_dev_get(npu_pdev);
1217 npu_pdn = pci_get_pdn(npu_pdev);
1218 rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
1219 npu_pdn->pcidev = npu_pdev;
1220 npu_pdn->pe_number = pe_num;
Alistair Poppleb5215492016-01-11 16:53:49 +11001221 phb->ioda.pe_rmap[rid] = pe->pe_number;
1222
1223 /* Map the PE to this link */
1224 rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
1225 OpalPciBusAll,
1226 OPAL_COMPARE_RID_DEVICE_NUMBER,
1227 OPAL_COMPARE_RID_FUNCTION_NUMBER,
1228 OPAL_MAP_PE);
1229 WARN_ON(rc != OPAL_SUCCESS);
1230 found_pe = true;
1231 break;
1232 }
1233 }
1234
1235 if (!found_pe)
1236 /*
1237 * Could not find an existing PE so allocate a new
1238 * one.
1239 */
1240 return pnv_ioda_setup_dev_PE(npu_pdev);
1241 else
1242 return pe;
1243}
1244
1245static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
1246{
Alistair Popple5d2aa712015-12-17 13:43:13 +11001247 struct pci_dev *pdev;
1248
1249 list_for_each_entry(pdev, &bus->devices, bus_list)
Alistair Poppleb5215492016-01-11 16:53:49 +11001250 pnv_ioda_setup_npu_PE(pdev);
Alistair Popple5d2aa712015-12-17 13:43:13 +11001251}
1252
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08001253static void pnv_pci_ioda_setup_PEs(void)
Gavin Shanfb446ad2012-08-20 03:49:14 +00001254{
1255 struct pci_controller *hose, *tmp;
Guo Chao262af552014-07-21 14:42:30 +10001256 struct pnv_phb *phb;
Gavin Shanfb446ad2012-08-20 03:49:14 +00001257
1258 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
Guo Chao262af552014-07-21 14:42:30 +10001259 phb = hose->private_data;
Alistair Popple08f48f32016-01-11 16:53:50 +11001260 if (phb->type == PNV_PHB_NPU) {
1261 /* PE#0 is needed for error reporting */
1262 pnv_ioda_reserve_pe(phb, 0);
Alistair Poppleb5215492016-01-11 16:53:49 +11001263 pnv_ioda_setup_npu_PEs(hose->bus);
Alistair Popple1ab66d12017-04-03 19:51:44 +10001264 if (phb->model == PNV_PHB_MODEL_NPU2)
1265 pnv_npu2_init(phb);
Gavin Shanccd1c192016-05-20 16:41:31 +10001266 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001267 }
1268}
1269
Gavin Shana8b2f822015-03-25 16:23:52 +08001270#ifdef CONFIG_PCI_IOV
Wei Yangee8222f2015-10-22 09:22:16 +08001271static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
Wei Yang781a8682015-03-25 16:23:57 +08001272{
1273 struct pci_bus *bus;
1274 struct pci_controller *hose;
1275 struct pnv_phb *phb;
1276 struct pci_dn *pdn;
Wei Yang02639b02015-03-25 16:23:59 +08001277 int i, j;
Wei Yangee8222f2015-10-22 09:22:16 +08001278 int m64_bars;
Wei Yang781a8682015-03-25 16:23:57 +08001279
1280 bus = pdev->bus;
1281 hose = pci_bus_to_host(bus);
1282 phb = hose->private_data;
1283 pdn = pci_get_pdn(pdev);
1284
Wei Yangee8222f2015-10-22 09:22:16 +08001285 if (pdn->m64_single_mode)
1286 m64_bars = num_vfs;
1287 else
1288 m64_bars = 1;
1289
Wei Yang02639b02015-03-25 16:23:59 +08001290 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
Wei Yangee8222f2015-10-22 09:22:16 +08001291 for (j = 0; j < m64_bars; j++) {
1292 if (pdn->m64_map[j][i] == IODA_INVALID_M64)
Wei Yang02639b02015-03-25 16:23:59 +08001293 continue;
1294 opal_pci_phb_mmio_enable(phb->opal_id,
Wei Yangee8222f2015-10-22 09:22:16 +08001295 OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0);
1296 clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc);
1297 pdn->m64_map[j][i] = IODA_INVALID_M64;
Wei Yang02639b02015-03-25 16:23:59 +08001298 }
Wei Yang781a8682015-03-25 16:23:57 +08001299
Wei Yangee8222f2015-10-22 09:22:16 +08001300 kfree(pdn->m64_map);
Wei Yang781a8682015-03-25 16:23:57 +08001301 return 0;
1302}
1303
Wei Yang02639b02015-03-25 16:23:59 +08001304static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
Wei Yang781a8682015-03-25 16:23:57 +08001305{
1306 struct pci_bus *bus;
1307 struct pci_controller *hose;
1308 struct pnv_phb *phb;
1309 struct pci_dn *pdn;
1310 unsigned int win;
1311 struct resource *res;
Wei Yang02639b02015-03-25 16:23:59 +08001312 int i, j;
Wei Yang781a8682015-03-25 16:23:57 +08001313 int64_t rc;
Wei Yang02639b02015-03-25 16:23:59 +08001314 int total_vfs;
1315 resource_size_t size, start;
1316 int pe_num;
Wei Yangee8222f2015-10-22 09:22:16 +08001317 int m64_bars;
Wei Yang781a8682015-03-25 16:23:57 +08001318
1319 bus = pdev->bus;
1320 hose = pci_bus_to_host(bus);
1321 phb = hose->private_data;
1322 pdn = pci_get_pdn(pdev);
Wei Yang02639b02015-03-25 16:23:59 +08001323 total_vfs = pci_sriov_get_totalvfs(pdev);
Wei Yang781a8682015-03-25 16:23:57 +08001324
Wei Yangee8222f2015-10-22 09:22:16 +08001325 if (pdn->m64_single_mode)
1326 m64_bars = num_vfs;
1327 else
1328 m64_bars = 1;
Wei Yang02639b02015-03-25 16:23:59 +08001329
Markus Elfringfb37e122016-08-24 22:26:37 +02001330 pdn->m64_map = kmalloc_array(m64_bars,
1331 sizeof(*pdn->m64_map),
1332 GFP_KERNEL);
Wei Yangee8222f2015-10-22 09:22:16 +08001333 if (!pdn->m64_map)
1334 return -ENOMEM;
1335 /* Initialize the m64_map to IODA_INVALID_M64 */
1336 for (i = 0; i < m64_bars ; i++)
1337 for (j = 0; j < PCI_SRIOV_NUM_BARS; j++)
1338 pdn->m64_map[i][j] = IODA_INVALID_M64;
1339
Wei Yang781a8682015-03-25 16:23:57 +08001340
1341 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1342 res = &pdev->resource[i + PCI_IOV_RESOURCES];
1343 if (!res->flags || !res->parent)
1344 continue;
1345
Wei Yangee8222f2015-10-22 09:22:16 +08001346 for (j = 0; j < m64_bars; j++) {
Wei Yang02639b02015-03-25 16:23:59 +08001347 do {
1348 win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1349 phb->ioda.m64_bar_idx + 1, 0);
Wei Yang781a8682015-03-25 16:23:57 +08001350
Wei Yang02639b02015-03-25 16:23:59 +08001351 if (win >= phb->ioda.m64_bar_idx + 1)
1352 goto m64_failed;
1353 } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
Wei Yang781a8682015-03-25 16:23:57 +08001354
Wei Yangee8222f2015-10-22 09:22:16 +08001355 pdn->m64_map[j][i] = win;
Wei Yang781a8682015-03-25 16:23:57 +08001356
Wei Yangee8222f2015-10-22 09:22:16 +08001357 if (pdn->m64_single_mode) {
Wei Yang02639b02015-03-25 16:23:59 +08001358 size = pci_iov_resource_size(pdev,
1359 PCI_IOV_RESOURCES + i);
Wei Yang02639b02015-03-25 16:23:59 +08001360 start = res->start + size * j;
1361 } else {
1362 size = resource_size(res);
1363 start = res->start;
1364 }
1365
1366 /* Map the M64 here */
Wei Yangee8222f2015-10-22 09:22:16 +08001367 if (pdn->m64_single_mode) {
Wei Yangbe283ee2015-10-22 09:22:19 +08001368 pe_num = pdn->pe_num_map[j];
Wei Yang02639b02015-03-25 16:23:59 +08001369 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1370 pe_num, OPAL_M64_WINDOW_TYPE,
Wei Yangee8222f2015-10-22 09:22:16 +08001371 pdn->m64_map[j][i], 0);
Wei Yang02639b02015-03-25 16:23:59 +08001372 }
1373
1374 rc = opal_pci_set_phb_mem_window(phb->opal_id,
Wei Yang781a8682015-03-25 16:23:57 +08001375 OPAL_M64_WINDOW_TYPE,
Wei Yangee8222f2015-10-22 09:22:16 +08001376 pdn->m64_map[j][i],
Wei Yang02639b02015-03-25 16:23:59 +08001377 start,
Wei Yang781a8682015-03-25 16:23:57 +08001378 0, /* unused */
Wei Yang02639b02015-03-25 16:23:59 +08001379 size);
Wei Yang781a8682015-03-25 16:23:57 +08001380
Wei Yang02639b02015-03-25 16:23:59 +08001381
1382 if (rc != OPAL_SUCCESS) {
1383 dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1384 win, rc);
1385 goto m64_failed;
1386 }
1387
Wei Yangee8222f2015-10-22 09:22:16 +08001388 if (pdn->m64_single_mode)
Wei Yang02639b02015-03-25 16:23:59 +08001389 rc = opal_pci_phb_mmio_enable(phb->opal_id,
Wei Yangee8222f2015-10-22 09:22:16 +08001390 OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2);
Wei Yang02639b02015-03-25 16:23:59 +08001391 else
1392 rc = opal_pci_phb_mmio_enable(phb->opal_id,
Wei Yangee8222f2015-10-22 09:22:16 +08001393 OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1);
Wei Yang02639b02015-03-25 16:23:59 +08001394
1395 if (rc != OPAL_SUCCESS) {
1396 dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1397 win, rc);
1398 goto m64_failed;
1399 }
Wei Yang781a8682015-03-25 16:23:57 +08001400 }
1401 }
1402 return 0;
1403
1404m64_failed:
Wei Yangee8222f2015-10-22 09:22:16 +08001405 pnv_pci_vf_release_m64(pdev, num_vfs);
Wei Yang781a8682015-03-25 16:23:57 +08001406 return -EBUSY;
1407}
1408
Alexey Kardashevskiyc035e372015-06-05 16:35:21 +10001409static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
1410 int num);
1411static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
1412
Wei Yang781a8682015-03-25 16:23:57 +08001413static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1414{
Wei Yang781a8682015-03-25 16:23:57 +08001415 struct iommu_table *tbl;
Wei Yang781a8682015-03-25 16:23:57 +08001416 int64_t rc;
1417
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001418 tbl = pe->table_group.tables[0];
Alexey Kardashevskiyc035e372015-06-05 16:35:21 +10001419 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
Wei Yang781a8682015-03-25 16:23:57 +08001420 if (rc)
1421 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
1422
Alexey Kardashevskiyc035e372015-06-05 16:35:21 +10001423 pnv_pci_ioda2_set_bypass(pe, false);
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001424 if (pe->table_group.group) {
1425 iommu_group_put(pe->table_group.group);
1426 BUG_ON(pe->table_group.group);
Alexey Kardashevskiyac9a5882015-06-05 16:34:56 +10001427 }
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10001428 pnv_pci_ioda2_table_free_pages(tbl);
Wei Yang781a8682015-03-25 16:23:57 +08001429 iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
Wei Yang781a8682015-03-25 16:23:57 +08001430}
1431
Wei Yangee8222f2015-10-22 09:22:16 +08001432static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
Wei Yang781a8682015-03-25 16:23:57 +08001433{
1434 struct pci_bus *bus;
1435 struct pci_controller *hose;
1436 struct pnv_phb *phb;
1437 struct pnv_ioda_pe *pe, *pe_n;
1438 struct pci_dn *pdn;
1439
1440 bus = pdev->bus;
1441 hose = pci_bus_to_host(bus);
1442 phb = hose->private_data;
Wei Yang02639b02015-03-25 16:23:59 +08001443 pdn = pci_get_pdn(pdev);
Wei Yang781a8682015-03-25 16:23:57 +08001444
1445 if (!pdev->is_physfn)
1446 return;
1447
Wei Yang781a8682015-03-25 16:23:57 +08001448 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1449 if (pe->parent_dev != pdev)
1450 continue;
1451
1452 pnv_pci_ioda2_release_dma_pe(pdev, pe);
1453
1454 /* Remove from list */
1455 mutex_lock(&phb->ioda.pe_list_mutex);
1456 list_del(&pe->list);
1457 mutex_unlock(&phb->ioda.pe_list_mutex);
1458
1459 pnv_ioda_deconfigure_pe(phb, pe);
1460
Gavin Shan1e916772016-05-03 15:41:36 +10001461 pnv_ioda_free_pe(pe);
Wei Yang781a8682015-03-25 16:23:57 +08001462 }
1463}
1464
1465void pnv_pci_sriov_disable(struct pci_dev *pdev)
1466{
1467 struct pci_bus *bus;
1468 struct pci_controller *hose;
1469 struct pnv_phb *phb;
Gavin Shan1e916772016-05-03 15:41:36 +10001470 struct pnv_ioda_pe *pe;
Wei Yang781a8682015-03-25 16:23:57 +08001471 struct pci_dn *pdn;
Wei Yangbe283ee2015-10-22 09:22:19 +08001472 u16 num_vfs, i;
Wei Yang781a8682015-03-25 16:23:57 +08001473
1474 bus = pdev->bus;
1475 hose = pci_bus_to_host(bus);
1476 phb = hose->private_data;
1477 pdn = pci_get_pdn(pdev);
Wei Yang781a8682015-03-25 16:23:57 +08001478 num_vfs = pdn->num_vfs;
1479
1480 /* Release VF PEs */
Wei Yangee8222f2015-10-22 09:22:16 +08001481 pnv_ioda_release_vf_PE(pdev);
Wei Yang781a8682015-03-25 16:23:57 +08001482
1483 if (phb->type == PNV_PHB_IODA2) {
Wei Yangee8222f2015-10-22 09:22:16 +08001484 if (!pdn->m64_single_mode)
Wei Yangbe283ee2015-10-22 09:22:19 +08001485 pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
Wei Yang781a8682015-03-25 16:23:57 +08001486
1487 /* Release M64 windows */
Wei Yangee8222f2015-10-22 09:22:16 +08001488 pnv_pci_vf_release_m64(pdev, num_vfs);
Wei Yang781a8682015-03-25 16:23:57 +08001489
1490 /* Release PE numbers */
Wei Yangbe283ee2015-10-22 09:22:19 +08001491 if (pdn->m64_single_mode) {
1492 for (i = 0; i < num_vfs; i++) {
Gavin Shan1e916772016-05-03 15:41:36 +10001493 if (pdn->pe_num_map[i] == IODA_INVALID_PE)
1494 continue;
1495
1496 pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
1497 pnv_ioda_free_pe(pe);
Wei Yangbe283ee2015-10-22 09:22:19 +08001498 }
1499 } else
1500 bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1501 /* Releasing pe_num_map */
1502 kfree(pdn->pe_num_map);
Wei Yang781a8682015-03-25 16:23:57 +08001503 }
1504}
1505
1506static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1507 struct pnv_ioda_pe *pe);
1508static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1509{
1510 struct pci_bus *bus;
1511 struct pci_controller *hose;
1512 struct pnv_phb *phb;
1513 struct pnv_ioda_pe *pe;
1514 int pe_num;
1515 u16 vf_index;
1516 struct pci_dn *pdn;
1517
1518 bus = pdev->bus;
1519 hose = pci_bus_to_host(bus);
1520 phb = hose->private_data;
1521 pdn = pci_get_pdn(pdev);
1522
1523 if (!pdev->is_physfn)
1524 return;
1525
1526 /* Reserve PE for each VF */
1527 for (vf_index = 0; vf_index < num_vfs; vf_index++) {
Wei Yangbe283ee2015-10-22 09:22:19 +08001528 if (pdn->m64_single_mode)
1529 pe_num = pdn->pe_num_map[vf_index];
1530 else
1531 pe_num = *pdn->pe_num_map + vf_index;
Wei Yang781a8682015-03-25 16:23:57 +08001532
1533 pe = &phb->ioda.pe_array[pe_num];
1534 pe->pe_number = pe_num;
1535 pe->phb = phb;
1536 pe->flags = PNV_IODA_PE_VF;
1537 pe->pbus = NULL;
1538 pe->parent_dev = pdev;
Wei Yang781a8682015-03-25 16:23:57 +08001539 pe->mve_number = -1;
1540 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1541 pci_iov_virtfn_devfn(pdev, vf_index);
1542
Russell Currey1f52f172016-11-16 14:02:15 +11001543 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
Wei Yang781a8682015-03-25 16:23:57 +08001544 hose->global_number, pdev->bus->number,
1545 PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1546 PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1547
1548 if (pnv_ioda_configure_pe(phb, pe)) {
1549 /* XXX What do we do here ? */
Gavin Shan1e916772016-05-03 15:41:36 +10001550 pnv_ioda_free_pe(pe);
Wei Yang781a8682015-03-25 16:23:57 +08001551 pe->pdev = NULL;
1552 continue;
1553 }
1554
Wei Yang781a8682015-03-25 16:23:57 +08001555 /* Put PE to the list */
1556 mutex_lock(&phb->ioda.pe_list_mutex);
1557 list_add_tail(&pe->list, &phb->ioda.pe_list);
1558 mutex_unlock(&phb->ioda.pe_list_mutex);
1559
1560 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1561 }
1562}
1563
1564int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1565{
1566 struct pci_bus *bus;
1567 struct pci_controller *hose;
1568 struct pnv_phb *phb;
Gavin Shan1e916772016-05-03 15:41:36 +10001569 struct pnv_ioda_pe *pe;
Wei Yang781a8682015-03-25 16:23:57 +08001570 struct pci_dn *pdn;
1571 int ret;
Wei Yangbe283ee2015-10-22 09:22:19 +08001572 u16 i;
Wei Yang781a8682015-03-25 16:23:57 +08001573
1574 bus = pdev->bus;
1575 hose = pci_bus_to_host(bus);
1576 phb = hose->private_data;
1577 pdn = pci_get_pdn(pdev);
1578
1579 if (phb->type == PNV_PHB_IODA2) {
Wei Yangb0331852015-10-22 09:22:14 +08001580 if (!pdn->vfs_expanded) {
1581 dev_info(&pdev->dev, "don't support this SRIOV device"
1582 " with non 64bit-prefetchable IOV BAR\n");
1583 return -ENOSPC;
1584 }
1585
Wei Yangee8222f2015-10-22 09:22:16 +08001586 /*
1587 * When M64 BARs functions in Single PE mode, the number of VFs
1588 * could be enabled must be less than the number of M64 BARs.
1589 */
1590 if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
1591 dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
1592 return -EBUSY;
1593 }
1594
Wei Yangbe283ee2015-10-22 09:22:19 +08001595 /* Allocating pe_num_map */
1596 if (pdn->m64_single_mode)
Markus Elfringfb37e122016-08-24 22:26:37 +02001597 pdn->pe_num_map = kmalloc_array(num_vfs,
1598 sizeof(*pdn->pe_num_map),
1599 GFP_KERNEL);
Wei Yangbe283ee2015-10-22 09:22:19 +08001600 else
1601 pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
1602
1603 if (!pdn->pe_num_map)
1604 return -ENOMEM;
1605
1606 if (pdn->m64_single_mode)
1607 for (i = 0; i < num_vfs; i++)
1608 pdn->pe_num_map[i] = IODA_INVALID_PE;
1609
Wei Yang781a8682015-03-25 16:23:57 +08001610 /* Calculate available PE for required VFs */
Wei Yangbe283ee2015-10-22 09:22:19 +08001611 if (pdn->m64_single_mode) {
1612 for (i = 0; i < num_vfs; i++) {
Gavin Shan1e916772016-05-03 15:41:36 +10001613 pe = pnv_ioda_alloc_pe(phb);
1614 if (!pe) {
Wei Yangbe283ee2015-10-22 09:22:19 +08001615 ret = -EBUSY;
1616 goto m64_failed;
1617 }
Gavin Shan1e916772016-05-03 15:41:36 +10001618
1619 pdn->pe_num_map[i] = pe->pe_number;
Wei Yangbe283ee2015-10-22 09:22:19 +08001620 }
1621 } else {
1622 mutex_lock(&phb->ioda.pe_alloc_mutex);
1623 *pdn->pe_num_map = bitmap_find_next_zero_area(
Gavin Shan92b8f132016-05-03 15:41:24 +10001624 phb->ioda.pe_alloc, phb->ioda.total_pe_num,
Wei Yangbe283ee2015-10-22 09:22:19 +08001625 0, num_vfs, 0);
Gavin Shan92b8f132016-05-03 15:41:24 +10001626 if (*pdn->pe_num_map >= phb->ioda.total_pe_num) {
Wei Yangbe283ee2015-10-22 09:22:19 +08001627 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1628 dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1629 kfree(pdn->pe_num_map);
1630 return -EBUSY;
1631 }
1632 bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
Wei Yang781a8682015-03-25 16:23:57 +08001633 mutex_unlock(&phb->ioda.pe_alloc_mutex);
Wei Yang781a8682015-03-25 16:23:57 +08001634 }
Wei Yang781a8682015-03-25 16:23:57 +08001635 pdn->num_vfs = num_vfs;
Wei Yang781a8682015-03-25 16:23:57 +08001636
1637 /* Assign M64 window accordingly */
Wei Yang02639b02015-03-25 16:23:59 +08001638 ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
Wei Yang781a8682015-03-25 16:23:57 +08001639 if (ret) {
1640 dev_info(&pdev->dev, "Not enough M64 window resources\n");
1641 goto m64_failed;
1642 }
1643
1644 /*
1645 * When using one M64 BAR to map one IOV BAR, we need to shift
1646 * the IOV BAR according to the PE# allocated to the VFs.
1647 * Otherwise, the PE# for the VF will conflict with others.
1648 */
Wei Yangee8222f2015-10-22 09:22:16 +08001649 if (!pdn->m64_single_mode) {
Wei Yangbe283ee2015-10-22 09:22:19 +08001650 ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
Wei Yang02639b02015-03-25 16:23:59 +08001651 if (ret)
1652 goto m64_failed;
1653 }
Wei Yang781a8682015-03-25 16:23:57 +08001654 }
1655
1656 /* Setup VF PEs */
1657 pnv_ioda_setup_vf_PE(pdev, num_vfs);
1658
1659 return 0;
1660
1661m64_failed:
Wei Yangbe283ee2015-10-22 09:22:19 +08001662 if (pdn->m64_single_mode) {
1663 for (i = 0; i < num_vfs; i++) {
Gavin Shan1e916772016-05-03 15:41:36 +10001664 if (pdn->pe_num_map[i] == IODA_INVALID_PE)
1665 continue;
1666
1667 pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
1668 pnv_ioda_free_pe(pe);
Wei Yangbe283ee2015-10-22 09:22:19 +08001669 }
1670 } else
1671 bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
1672
1673 /* Releasing pe_num_map */
1674 kfree(pdn->pe_num_map);
Wei Yang781a8682015-03-25 16:23:57 +08001675
1676 return ret;
1677}
1678
Gavin Shana8b2f822015-03-25 16:23:52 +08001679int pcibios_sriov_disable(struct pci_dev *pdev)
1680{
Wei Yang781a8682015-03-25 16:23:57 +08001681 pnv_pci_sriov_disable(pdev);
1682
Gavin Shana8b2f822015-03-25 16:23:52 +08001683 /* Release PCI data */
1684 remove_dev_pci_data(pdev);
1685 return 0;
1686}
1687
1688int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1689{
1690 /* Allocate PCI data */
1691 add_dev_pci_data(pdev);
Wei Yang781a8682015-03-25 16:23:57 +08001692
Wei Yangee8222f2015-10-22 09:22:16 +08001693 return pnv_pci_sriov_enable(pdev, num_vfs);
Gavin Shana8b2f822015-03-25 16:23:52 +08001694}
1695#endif /* CONFIG_PCI_IOV */
1696
Gavin Shan959c9bd2013-04-25 19:21:02 +00001697static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001698{
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00001699 struct pci_dn *pdn = pci_get_pdn(pdev);
Gavin Shan959c9bd2013-04-25 19:21:02 +00001700 struct pnv_ioda_pe *pe;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001701
Gavin Shan959c9bd2013-04-25 19:21:02 +00001702 /*
1703 * The function can be called while the PE#
1704 * hasn't been assigned. Do nothing for the
1705 * case.
1706 */
1707 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1708 return;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001709
Gavin Shan959c9bd2013-04-25 19:21:02 +00001710 pe = &phb->ioda.pe_array[pdn->pe_number];
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001711 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
Alexey Kardashevskiy0e1ffef2015-08-27 16:01:16 +10001712 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001713 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
Alexey Kardashevskiy46170822015-06-05 16:34:54 +10001714 /*
1715 * Note: iommu_add_device() will fail here as
1716 * for physical PE: the device is already added by now;
1717 * for virtual PE: sysfs entries are not ready yet and
1718 * tce_iommu_bus_notifier will add the device to a group later.
1719 */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00001720}
1721
Daniel Axtens763d2d82015-04-28 15:12:07 +10001722static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001723{
Daniel Axtens763d2d82015-04-28 15:12:07 +10001724 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1725 struct pnv_phb *phb = hose->private_data;
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001726 struct pci_dn *pdn = pci_get_pdn(pdev);
1727 struct pnv_ioda_pe *pe;
1728 uint64_t top;
1729 bool bypass = false;
1730
1731 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1732 return -ENODEV;;
1733
1734 pe = &phb->ioda.pe_array[pdn->pe_number];
1735 if (pe->tce_bypass_enabled) {
1736 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
1737 bypass = (dma_mask >= top);
1738 }
1739
1740 if (bypass) {
1741 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
1742 set_dma_ops(&pdev->dev, &dma_direct_ops);
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001743 } else {
1744 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
1745 set_dma_ops(&pdev->dev, &dma_iommu_ops);
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001746 }
Brian W Harta32305b2014-07-31 14:24:37 -05001747 *pdev->dev.dma_mask = dma_mask;
Alistair Popple5d2aa712015-12-17 13:43:13 +11001748
1749 /* Update peer npu devices */
Alexey Kardashevskiyf9f83452016-04-29 18:55:20 +10001750 pnv_npu_try_dma_set_bypass(pdev, bypass);
Alistair Popple5d2aa712015-12-17 13:43:13 +11001751
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11001752 return 0;
1753}
1754
Andrew Donnellan535229822015-08-07 13:45:54 +10001755static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
Gavin Shanfe7e85c2014-09-30 12:39:10 +10001756{
Andrew Donnellan535229822015-08-07 13:45:54 +10001757 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
1758 struct pnv_phb *phb = hose->private_data;
Gavin Shanfe7e85c2014-09-30 12:39:10 +10001759 struct pci_dn *pdn = pci_get_pdn(pdev);
1760 struct pnv_ioda_pe *pe;
1761 u64 end, mask;
1762
1763 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
1764 return 0;
1765
1766 pe = &phb->ioda.pe_array[pdn->pe_number];
1767 if (!pe->tce_bypass_enabled)
1768 return __dma_get_required_mask(&pdev->dev);
1769
1770
1771 end = pe->tce_bypass_base + memblock_end_of_DRAM();
1772 mask = 1ULL << (fls64(end) - 1);
1773 mask += mask - 1;
1774
1775 return mask;
1776}
1777
Gavin Shandff4a392014-07-15 17:00:55 +10001778static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
Alexey Kardashevskiydb08e1d2017-02-21 13:41:31 +11001779 struct pci_bus *bus,
1780 bool add_to_group)
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +10001781{
1782 struct pci_dev *dev;
1783
1784 list_for_each_entry(dev, &bus->devices, bus_list) {
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001785 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
Benjamin Herrenschmidte91c25112015-06-24 15:25:27 +10001786 set_dma_offset(&dev->dev, pe->tce_bypass_base);
Alexey Kardashevskiydb08e1d2017-02-21 13:41:31 +11001787 if (add_to_group)
1788 iommu_add_device(&dev->dev);
Gavin Shandff4a392014-07-15 17:00:55 +10001789
Alexey Kardashevskiy5c89a872015-06-18 11:41:36 +10001790 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
Alexey Kardashevskiydb08e1d2017-02-21 13:41:31 +11001791 pnv_ioda_setup_bus_dma(pe, dev->subordinate,
1792 add_to_group);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +10001793 }
1794}
1795
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10001796static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
1797 bool real_mode)
1798{
1799 return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
1800 (phb->regs + 0x210);
1801}
1802
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10001803static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001804 unsigned long index, unsigned long npages, bool rm)
Gavin Shan4cce9552013-04-25 19:21:00 +00001805{
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10001806 struct iommu_table_group_link *tgl = list_first_entry_or_null(
1807 &tbl->it_group_list, struct iommu_table_group_link,
1808 next);
1809 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10001810 struct pnv_ioda_pe, table_group);
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10001811 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
Gavin Shan4cce9552013-04-25 19:21:00 +00001812 unsigned long start, end, inc;
1813
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001814 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
1815 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
1816 npages - 1);
Gavin Shan4cce9552013-04-25 19:21:00 +00001817
Benjamin Herrenschmidt08acce12016-07-08 16:37:13 +10001818 /* p7ioc-style invalidation, 2 TCEs per write */
1819 start |= (1ull << 63);
1820 end |= (1ull << 63);
1821 inc = 16;
Gavin Shan4cce9552013-04-25 19:21:00 +00001822 end |= inc - 1; /* round up end to be different than start */
1823
1824 mb(); /* Ensure above stores are visible */
1825 while (start <= end) {
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10001826 if (rm)
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001827 __raw_rm_writeq(cpu_to_be64(start), invalidate);
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10001828 else
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001829 __raw_writeq(cpu_to_be64(start), invalidate);
Gavin Shan4cce9552013-04-25 19:21:00 +00001830 start += inc;
1831 }
1832
1833 /*
1834 * The iommu layer will do another mb() for us on build()
1835 * and we don't care on free()
1836 */
1837}
1838
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001839static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
1840 long npages, unsigned long uaddr,
1841 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07001842 unsigned long attrs)
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001843{
1844 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1845 attrs);
1846
Benjamin Herrenschmidt08acce12016-07-08 16:37:13 +10001847 if (!ret)
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10001848 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001849
1850 return ret;
1851}
1852
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001853#ifdef CONFIG_IOMMU_API
1854static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
1855 unsigned long *hpa, enum dma_data_direction *direction)
1856{
1857 long ret = pnv_tce_xchg(tbl, index, hpa, direction);
1858
Benjamin Herrenschmidt08acce12016-07-08 16:37:13 +10001859 if (!ret)
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10001860 pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false);
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001861
1862 return ret;
1863}
1864#endif
1865
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001866static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
1867 long npages)
1868{
1869 pnv_tce_free(tbl, index, npages);
1870
Benjamin Herrenschmidt08acce12016-07-08 16:37:13 +10001871 pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001872}
1873
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10001874static struct iommu_table_ops pnv_ioda1_iommu_ops = {
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001875 .set = pnv_ioda1_tce_build,
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001876#ifdef CONFIG_IOMMU_API
1877 .exchange = pnv_ioda1_tce_xchg,
1878#endif
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001879 .clear = pnv_ioda1_tce_free,
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10001880 .get = pnv_tce_get,
1881};
1882
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10001883#define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0)
1884#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
1885#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
Alexey Kardashevskiybef92532016-04-29 18:55:17 +10001886
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10001887void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
Alexey Kardashevskiy0bbcdb42016-04-29 18:55:18 +10001888{
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10001889 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10001890 const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
Alexey Kardashevskiy0bbcdb42016-04-29 18:55:18 +10001891
1892 mb(); /* Ensure previous TCE table stores are visible */
1893 if (rm)
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10001894 __raw_rm_writeq(cpu_to_be64(val), invalidate);
Alexey Kardashevskiy0bbcdb42016-04-29 18:55:18 +10001895 else
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10001896 __raw_writeq(cpu_to_be64(val), invalidate);
Alexey Kardashevskiy0bbcdb42016-04-29 18:55:18 +10001897}
1898
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10001899static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
Alexey Kardashevskiy5780fb02015-06-05 16:35:12 +10001900{
1901 /* 01xb - invalidate TCEs that match the specified PE# */
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10001902 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10001903 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
Alexey Kardashevskiy5780fb02015-06-05 16:35:12 +10001904
1905 mb(); /* Ensure above stores are visible */
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10001906 __raw_writeq(cpu_to_be64(val), invalidate);
Alexey Kardashevskiy5780fb02015-06-05 16:35:12 +10001907}
1908
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10001909static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
1910 unsigned shift, unsigned long index,
1911 unsigned long npages)
Gavin Shan4cce9552013-04-25 19:21:00 +00001912{
Alexey Kardashevskiy4d902192016-08-03 18:40:45 +10001913 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
Gavin Shan4cce9552013-04-25 19:21:00 +00001914 unsigned long start, end, inc;
Gavin Shan4cce9552013-04-25 19:21:00 +00001915
1916 /* We'll invalidate DMA address in PE scope */
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10001917 start = PHB3_TCE_KILL_INVAL_ONE;
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10001918 start |= (pe->pe_number & 0xFF);
Gavin Shan4cce9552013-04-25 19:21:00 +00001919 end = start;
1920
1921 /* Figure out the start, end and step */
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001922 start |= (index << shift);
1923 end |= ((index + npages - 1) << shift);
Alexey Kardashevskiyb0376c92014-06-06 18:44:01 +10001924 inc = (0x1ull << shift);
Gavin Shan4cce9552013-04-25 19:21:00 +00001925 mb();
1926
1927 while (start <= end) {
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10001928 if (rm)
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001929 __raw_rm_writeq(cpu_to_be64(start), invalidate);
Alexey Kardashevskiy8e0a1612013-08-28 18:37:43 +10001930 else
Benjamin Herrenschmidt3ad26e52013-10-11 18:23:53 +11001931 __raw_writeq(cpu_to_be64(start), invalidate);
Gavin Shan4cce9552013-04-25 19:21:00 +00001932 start += inc;
1933 }
1934}
1935
Benjamin Herrenschmidtf0228c42016-07-08 16:37:15 +10001936static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
1937{
1938 struct pnv_phb *phb = pe->phb;
1939
1940 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
1941 pnv_pci_phb3_tce_invalidate_pe(pe);
1942 else
1943 opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
1944 pe->pe_number, 0, 0, 0);
1945}
1946
Alexey Kardashevskiye57080f2015-06-05 16:35:13 +10001947static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
1948 unsigned long index, unsigned long npages, bool rm)
1949{
1950 struct iommu_table_group_link *tgl;
1951
1952 list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
1953 struct pnv_ioda_pe *pe = container_of(tgl->table_group,
1954 struct pnv_ioda_pe, table_group);
Benjamin Herrenschmidtf0228c42016-07-08 16:37:15 +10001955 struct pnv_phb *phb = pe->phb;
1956 unsigned int shift = tbl->it_page_shift;
1957
Alistair Popple616badd2017-01-10 15:41:44 +11001958 /*
1959 * NVLink1 can use the TCE kill register directly as
1960 * it's the same as PHB3. NVLink2 is different and
1961 * should go via the OPAL call.
1962 */
1963 if (phb->model == PNV_PHB_MODEL_NPU) {
Alexey Kardashevskiy0bbcdb42016-04-29 18:55:18 +10001964 /*
1965 * The NVLink hardware does not support TCE kill
1966 * per TCE entry so we have to invalidate
1967 * the entire cache for it.
1968 */
Benjamin Herrenschmidtf0228c42016-07-08 16:37:15 +10001969 pnv_pci_phb3_tce_invalidate_entire(phb, rm);
Alexey Kardashevskiy85674862016-04-29 18:55:23 +10001970 continue;
1971 }
Benjamin Herrenschmidtf0228c42016-07-08 16:37:15 +10001972 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
1973 pnv_pci_phb3_tce_invalidate(pe, rm, shift,
1974 index, npages);
Benjamin Herrenschmidtf0228c42016-07-08 16:37:15 +10001975 else
1976 opal_pci_tce_kill(phb->opal_id,
1977 OPAL_PCI_TCE_KILL_PAGES,
1978 pe->pe_number, 1u << shift,
1979 index << shift, npages);
Alexey Kardashevskiye57080f2015-06-05 16:35:13 +10001980 }
1981}
1982
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001983static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
1984 long npages, unsigned long uaddr,
1985 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07001986 unsigned long attrs)
Gavin Shan4cce9552013-04-25 19:21:00 +00001987{
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001988 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
1989 attrs);
Gavin Shan4cce9552013-04-25 19:21:00 +00001990
Benjamin Herrenschmidt08acce12016-07-08 16:37:13 +10001991 if (!ret)
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10001992 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
1993
1994 return ret;
1995}
1996
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10001997#ifdef CONFIG_IOMMU_API
1998static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
1999 unsigned long *hpa, enum dma_data_direction *direction)
2000{
2001 long ret = pnv_tce_xchg(tbl, index, hpa, direction);
2002
Benjamin Herrenschmidt08acce12016-07-08 16:37:13 +10002003 if (!ret)
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10002004 pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
2005
2006 return ret;
2007}
2008#endif
2009
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10002010static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
2011 long npages)
2012{
2013 pnv_tce_free(tbl, index, npages);
2014
Benjamin Herrenschmidt08acce12016-07-08 16:37:13 +10002015 pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
Gavin Shan4cce9552013-04-25 19:21:00 +00002016}
2017
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +10002018static void pnv_ioda2_table_free(struct iommu_table *tbl)
2019{
2020 pnv_pci_ioda2_table_free_pages(tbl);
2021 iommu_free_table(tbl, "pnv");
2022}
2023
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10002024static struct iommu_table_ops pnv_ioda2_iommu_ops = {
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10002025 .set = pnv_ioda2_tce_build,
Alexey Kardashevskiy05c6cfb2015-06-05 16:35:15 +10002026#ifdef CONFIG_IOMMU_API
2027 .exchange = pnv_ioda2_tce_xchg,
2028#endif
Alexey Kardashevskiydecbda22015-06-05 16:35:07 +10002029 .clear = pnv_ioda2_tce_free,
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10002030 .get = pnv_tce_get,
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +10002031 .free = pnv_ioda2_table_free,
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10002032};
2033
Gavin Shan801846d2016-05-03 15:41:34 +10002034static int pnv_pci_ioda_dev_dma_weight(struct pci_dev *dev, void *data)
2035{
2036 unsigned int *weight = (unsigned int *)data;
2037
2038 /* This is quite simplistic. The "base" weight of a device
2039 * is 10. 0 means no DMA is to be accounted for it.
2040 */
2041 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
2042 return 0;
2043
2044 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
2045 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
2046 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
2047 *weight += 3;
2048 else if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
2049 *weight += 15;
2050 else
2051 *weight += 10;
2052
2053 return 0;
2054}
2055
2056static unsigned int pnv_pci_ioda_pe_dma_weight(struct pnv_ioda_pe *pe)
2057{
2058 unsigned int weight = 0;
2059
2060 /* SRIOV VF has same DMA32 weight as its PF */
2061#ifdef CONFIG_PCI_IOV
2062 if ((pe->flags & PNV_IODA_PE_VF) && pe->parent_dev) {
2063 pnv_pci_ioda_dev_dma_weight(pe->parent_dev, &weight);
2064 return weight;
2065 }
2066#endif
2067
2068 if ((pe->flags & PNV_IODA_PE_DEV) && pe->pdev) {
2069 pnv_pci_ioda_dev_dma_weight(pe->pdev, &weight);
2070 } else if ((pe->flags & PNV_IODA_PE_BUS) && pe->pbus) {
2071 struct pci_dev *pdev;
2072
2073 list_for_each_entry(pdev, &pe->pbus->devices, bus_list)
2074 pnv_pci_ioda_dev_dma_weight(pdev, &weight);
2075 } else if ((pe->flags & PNV_IODA_PE_BUS_ALL) && pe->pbus) {
2076 pci_walk_bus(pe->pbus, pnv_pci_ioda_dev_dma_weight, &weight);
2077 }
2078
2079 return weight;
2080}
2081
Gavin Shanb30d9362016-05-03 15:41:32 +10002082static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
Gavin Shan2b923ed2016-05-05 12:04:16 +10002083 struct pnv_ioda_pe *pe)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002084{
2085
2086 struct page *tce_mem = NULL;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002087 struct iommu_table *tbl;
Gavin Shan2b923ed2016-05-05 12:04:16 +10002088 unsigned int weight, total_weight = 0;
2089 unsigned int tce32_segsz, base, segs, avail, i;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002090 int64_t rc;
2091 void *addr;
2092
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002093 /* XXX FIXME: Handle 64-bit only DMA devices */
2094 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
2095 /* XXX FIXME: Allocate multi-level tables on PHB3 */
Gavin Shan2b923ed2016-05-05 12:04:16 +10002096 weight = pnv_pci_ioda_pe_dma_weight(pe);
2097 if (!weight)
2098 return;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002099
Gavin Shan2b923ed2016-05-05 12:04:16 +10002100 pci_walk_bus(phb->hose->bus, pnv_pci_ioda_dev_dma_weight,
2101 &total_weight);
2102 segs = (weight * phb->ioda.dma32_count) / total_weight;
2103 if (!segs)
2104 segs = 1;
2105
2106 /*
2107 * Allocate contiguous DMA32 segments. We begin with the expected
2108 * number of segments. With one more attempt, the number of DMA32
2109 * segments to be allocated is decreased by one until one segment
2110 * is allocated successfully.
2111 */
2112 do {
2113 for (base = 0; base <= phb->ioda.dma32_count - segs; base++) {
2114 for (avail = 0, i = base; i < base + segs; i++) {
2115 if (phb->ioda.dma32_segmap[i] ==
2116 IODA_INVALID_PE)
2117 avail++;
2118 }
2119
2120 if (avail == segs)
2121 goto found;
2122 }
2123 } while (--segs);
2124
2125 if (!segs) {
2126 pe_warn(pe, "No available DMA32 segments\n");
2127 return;
2128 }
2129
2130found:
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10002131 tbl = pnv_pci_table_alloc(phb->hose->node);
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10002132 iommu_register_group(&pe->table_group, phb->hose->global_number,
2133 pe->pe_number);
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10002134 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
Alexey Kardashevskiyc5773822015-06-05 16:34:55 +10002135
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002136 /* Grab a 32-bit TCE table */
Gavin Shan2b923ed2016-05-05 12:04:16 +10002137 pe_info(pe, "DMA weight %d (%d), assigned (%d) %d DMA32 segments\n",
2138 weight, total_weight, base, segs);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002139 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
Gavin Shanacce9712016-05-03 15:41:33 +10002140 base * PNV_IODA1_DMA32_SEGSIZE,
2141 (base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002142
2143 /* XXX Currently, we allocate one big contiguous table for the
2144 * TCEs. We only really need one chunk per 256M of TCE space
2145 * (ie per segment) but that's an optimization for later, it
2146 * requires some added smarts with our get/put_tce implementation
Gavin Shanacce9712016-05-03 15:41:33 +10002147 *
2148 * Each TCE page is 4KB in size and each TCE entry occupies 8
2149 * bytes
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002150 */
Gavin Shanacce9712016-05-03 15:41:33 +10002151 tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002152 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
Gavin Shanacce9712016-05-03 15:41:33 +10002153 get_order(tce32_segsz * segs));
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002154 if (!tce_mem) {
2155 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
2156 goto fail;
2157 }
2158 addr = page_address(tce_mem);
Gavin Shanacce9712016-05-03 15:41:33 +10002159 memset(addr, 0, tce32_segsz * segs);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002160
2161 /* Configure HW */
2162 for (i = 0; i < segs; i++) {
2163 rc = opal_pci_map_pe_dma_window(phb->opal_id,
2164 pe->pe_number,
2165 base + i, 1,
Gavin Shanacce9712016-05-03 15:41:33 +10002166 __pa(addr) + tce32_segsz * i,
2167 tce32_segsz, IOMMU_PAGE_SIZE_4K);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002168 if (rc) {
2169 pe_err(pe, " Failed to configure 32-bit TCE table,"
2170 " err %ld\n", rc);
2171 goto fail;
2172 }
2173 }
2174
Gavin Shan2b923ed2016-05-05 12:04:16 +10002175 /* Setup DMA32 segment mapping */
2176 for (i = base; i < base + segs; i++)
2177 phb->ioda.dma32_segmap[i] = pe->pe_number;
2178
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002179 /* Setup linux iommu table */
Gavin Shanacce9712016-05-03 15:41:33 +10002180 pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs,
2181 base * PNV_IODA1_DMA32_SEGSIZE,
2182 IOMMU_PAGE_SHIFT_4K);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002183
Alexey Kardashevskiyda004c32015-06-05 16:35:06 +10002184 tbl->it_ops = &pnv_ioda1_iommu_ops;
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +10002185 pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
2186 pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002187 iommu_init_table(tbl, phb->hose->node);
2188
Wei Yang781a8682015-03-25 16:23:57 +08002189 if (pe->flags & PNV_IODA_PE_DEV) {
Alexey Kardashevskiy46170822015-06-05 16:34:54 +10002190 /*
2191 * Setting table base here only for carrying iommu_group
2192 * further down to let iommu_add_device() do the job.
2193 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2194 */
2195 set_iommu_table_base(&pe->pdev->dev, tbl);
2196 iommu_add_device(&pe->pdev->dev);
Alexey Kardashevskiyc5773822015-06-05 16:34:55 +10002197 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
Alexey Kardashevskiydb08e1d2017-02-21 13:41:31 +11002198 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
Benjamin Herrenschmidt74251fe2013-07-01 17:54:09 +10002199
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002200 return;
2201 fail:
2202 /* XXX Failure: Try to fallback to 64-bit only ? */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002203 if (tce_mem)
Gavin Shanacce9712016-05-03 15:41:33 +10002204 __free_pages(tce_mem, get_order(tce32_segsz * segs));
Alexey Kardashevskiy0eaf4de2015-06-05 16:35:09 +10002205 if (tbl) {
2206 pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
2207 iommu_free_table(tbl, "pnv");
2208 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002209}
2210
Alexey Kardashevskiy43cb60a2015-06-05 16:35:18 +10002211static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
2212 int num, struct iommu_table *tbl)
2213{
2214 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2215 table_group);
2216 struct pnv_phb *phb = pe->phb;
2217 int64_t rc;
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002218 const unsigned long size = tbl->it_indirect_levels ?
2219 tbl->it_level_size : tbl->it_size;
Alexey Kardashevskiy43cb60a2015-06-05 16:35:18 +10002220 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
2221 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
2222
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +10002223 pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num,
Alexey Kardashevskiy43cb60a2015-06-05 16:35:18 +10002224 start_addr, start_addr + win_size - 1,
2225 IOMMU_PAGE_SIZE(tbl));
2226
2227 /*
2228 * Map TCE table through TVT. The TVE index is the PE number
2229 * shifted by 1 bit for 32-bits DMA space.
2230 */
2231 rc = opal_pci_map_pe_dma_window(phb->opal_id,
2232 pe->pe_number,
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +10002233 (pe->pe_number << 1) + num,
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002234 tbl->it_indirect_levels + 1,
Alexey Kardashevskiy43cb60a2015-06-05 16:35:18 +10002235 __pa(tbl->it_base),
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002236 size << 3,
Alexey Kardashevskiy43cb60a2015-06-05 16:35:18 +10002237 IOMMU_PAGE_SIZE(tbl));
2238 if (rc) {
2239 pe_err(pe, "Failed to configure TCE table, err %ld\n", rc);
2240 return rc;
2241 }
2242
2243 pnv_pci_link_table_and_group(phb->hose->node, num,
2244 tbl, &pe->table_group);
Michael Ellermaned7d9a12016-09-15 17:03:06 +10002245 pnv_pci_ioda2_tce_invalidate_pe(pe);
Alexey Kardashevskiy43cb60a2015-06-05 16:35:18 +10002246
2247 return 0;
2248}
2249
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002250static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11002251{
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11002252 uint16_t window_id = (pe->pe_number << 1 ) + 1;
2253 int64_t rc;
2254
2255 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
2256 if (enable) {
2257 phys_addr_t top = memblock_end_of_DRAM();
2258
2259 top = roundup_pow_of_two(top);
2260 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2261 pe->pe_number,
2262 window_id,
2263 pe->tce_bypass_base,
2264 top);
2265 } else {
2266 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
2267 pe->pe_number,
2268 window_id,
2269 pe->tce_bypass_base,
2270 0);
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11002271 }
2272 if (rc)
2273 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
2274 else
2275 pe->tce_bypass_enabled = enable;
2276}
2277
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +10002278static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2279 __u32 page_shift, __u64 window_size, __u32 levels,
2280 struct iommu_table *tbl);
2281
2282static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
2283 int num, __u32 page_shift, __u64 window_size, __u32 levels,
2284 struct iommu_table **ptbl)
2285{
2286 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2287 table_group);
2288 int nid = pe->phb->hose->node;
2289 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
2290 long ret;
2291 struct iommu_table *tbl;
2292
2293 tbl = pnv_pci_table_alloc(nid);
2294 if (!tbl)
2295 return -ENOMEM;
2296
2297 ret = pnv_pci_ioda2_table_alloc_pages(nid,
2298 bus_offset, page_shift, window_size,
2299 levels, tbl);
2300 if (ret) {
2301 iommu_free_table(tbl, "pnv");
2302 return ret;
2303 }
2304
2305 tbl->it_ops = &pnv_ioda2_iommu_ops;
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +10002306
2307 *ptbl = tbl;
2308
2309 return 0;
2310}
2311
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002312static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
2313{
2314 struct iommu_table *tbl = NULL;
2315 long rc;
2316
Nishanth Aravamudanbb005452015-09-02 08:39:28 -07002317 /*
Nishanth Aravamudanfa144862015-09-04 11:22:52 -07002318 * crashkernel= specifies the kdump kernel's maximum memory at
2319 * some offset and there is no guaranteed the result is a power
2320 * of 2, which will cause errors later.
2321 */
2322 const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max());
2323
2324 /*
Nishanth Aravamudanbb005452015-09-02 08:39:28 -07002325 * In memory constrained environments, e.g. kdump kernel, the
2326 * DMA window can be larger than available memory, which will
2327 * cause errors later.
2328 */
Nishanth Aravamudanfa144862015-09-04 11:22:52 -07002329 const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory);
Nishanth Aravamudanbb005452015-09-02 08:39:28 -07002330
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002331 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
2332 IOMMU_PAGE_SHIFT_4K,
Nishanth Aravamudanbb005452015-09-02 08:39:28 -07002333 window_size,
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002334 POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
2335 if (rc) {
2336 pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
2337 rc);
2338 return rc;
2339 }
2340
2341 iommu_init_table(tbl, pe->phb->hose->node);
2342
2343 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
2344 if (rc) {
2345 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
2346 rc);
2347 pnv_ioda2_table_free(tbl);
2348 return rc;
2349 }
2350
2351 if (!pnv_iommu_bypass_disabled)
2352 pnv_pci_ioda2_set_bypass(pe, true);
2353
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002354 /*
2355 * Setting table base here only for carrying iommu_group
2356 * further down to let iommu_add_device() do the job.
2357 * pnv_pci_ioda_dma_dev_setup will override it later anyway.
2358 */
2359 if (pe->flags & PNV_IODA_PE_DEV)
2360 set_iommu_table_base(&pe->pdev->dev, tbl);
2361
2362 return 0;
2363}
2364
Alexey Kardashevskiyb5926432015-06-15 17:49:59 +10002365#if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV)
2366static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
2367 int num)
2368{
2369 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2370 table_group);
2371 struct pnv_phb *phb = pe->phb;
2372 long ret;
2373
2374 pe_info(pe, "Removing DMA window #%d\n", num);
2375
2376 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
2377 (pe->pe_number << 1) + num,
2378 0/* levels */, 0/* table address */,
2379 0/* table size */, 0/* page size */);
2380 if (ret)
2381 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
2382 else
Michael Ellermaned7d9a12016-09-15 17:03:06 +10002383 pnv_pci_ioda2_tce_invalidate_pe(pe);
Alexey Kardashevskiyb5926432015-06-15 17:49:59 +10002384
2385 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
2386
2387 return ret;
2388}
2389#endif
2390
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002391#ifdef CONFIG_IOMMU_API
Alexey Kardashevskiy00547192015-06-05 16:35:22 +10002392static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
2393 __u64 window_size, __u32 levels)
2394{
2395 unsigned long bytes = 0;
2396 const unsigned window_shift = ilog2(window_size);
2397 unsigned entries_shift = window_shift - page_shift;
2398 unsigned table_shift = entries_shift + 3;
2399 unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
2400 unsigned long direct_table_size;
2401
2402 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
2403 (window_size > memory_hotplug_max()) ||
2404 !is_power_of_2(window_size))
2405 return 0;
2406
2407 /* Calculate a direct table size from window_size and levels */
2408 entries_shift = (entries_shift + levels - 1) / levels;
2409 table_shift = entries_shift + 3;
2410 table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
2411 direct_table_size = 1UL << table_shift;
2412
2413 for ( ; levels; --levels) {
2414 bytes += _ALIGN_UP(tce_table_size, direct_table_size);
2415
2416 tce_table_size /= direct_table_size;
2417 tce_table_size <<= 3;
2418 tce_table_size = _ALIGN_UP(tce_table_size, direct_table_size);
2419 }
2420
2421 return bytes;
2422}
2423
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002424static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11002425{
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002426 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2427 table_group);
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002428 /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */
2429 struct iommu_table *tbl = pe->table_group.tables[0];
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11002430
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002431 pnv_pci_ioda2_set_bypass(pe, false);
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002432 pnv_pci_ioda2_unset_window(&pe->table_group, 0);
Alexey Kardashevskiydb08e1d2017-02-21 13:41:31 +11002433 if (pe->pbus)
2434 pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002435 pnv_ioda2_table_free(tbl);
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +11002436}
2437
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002438static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
2439{
2440 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
2441 table_group);
2442
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002443 pnv_pci_ioda2_setup_default_config(pe);
Alexey Kardashevskiydb08e1d2017-02-21 13:41:31 +11002444 if (pe->pbus)
2445 pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002446}
2447
2448static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
Alexey Kardashevskiy00547192015-06-05 16:35:22 +10002449 .get_table_size = pnv_pci_ioda2_get_table_size,
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +10002450 .create_table = pnv_pci_ioda2_create_table,
2451 .set_window = pnv_pci_ioda2_set_window,
2452 .unset_window = pnv_pci_ioda2_unset_window,
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002453 .take_ownership = pnv_ioda2_take_ownership,
2454 .release_ownership = pnv_ioda2_release_ownership,
2455};
Alexey Kardashevskiyb5cb9ab2016-04-29 18:55:24 +10002456
2457static int gpe_table_group_to_npe_cb(struct device *dev, void *opaque)
2458{
2459 struct pci_controller *hose;
2460 struct pnv_phb *phb;
2461 struct pnv_ioda_pe **ptmppe = opaque;
2462 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
2463 struct pci_dn *pdn = pci_get_pdn(pdev);
2464
2465 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
2466 return 0;
2467
2468 hose = pci_bus_to_host(pdev->bus);
2469 phb = hose->private_data;
2470 if (phb->type != PNV_PHB_NPU)
2471 return 0;
2472
2473 *ptmppe = &phb->ioda.pe_array[pdn->pe_number];
2474
2475 return 1;
2476}
2477
2478/*
2479 * This returns PE of associated NPU.
2480 * This assumes that NPU is in the same IOMMU group with GPU and there is
2481 * no other PEs.
2482 */
2483static struct pnv_ioda_pe *gpe_table_group_to_npe(
2484 struct iommu_table_group *table_group)
2485{
2486 struct pnv_ioda_pe *npe = NULL;
2487 int ret = iommu_group_for_each_dev(table_group->group, &npe,
2488 gpe_table_group_to_npe_cb);
2489
2490 BUG_ON(!ret || !npe);
2491
2492 return npe;
2493}
2494
2495static long pnv_pci_ioda2_npu_set_window(struct iommu_table_group *table_group,
2496 int num, struct iommu_table *tbl)
2497{
2498 long ret = pnv_pci_ioda2_set_window(table_group, num, tbl);
2499
2500 if (ret)
2501 return ret;
2502
2503 ret = pnv_npu_set_window(gpe_table_group_to_npe(table_group), num, tbl);
2504 if (ret)
2505 pnv_pci_ioda2_unset_window(table_group, num);
2506
2507 return ret;
2508}
2509
2510static long pnv_pci_ioda2_npu_unset_window(
2511 struct iommu_table_group *table_group,
2512 int num)
2513{
2514 long ret = pnv_pci_ioda2_unset_window(table_group, num);
2515
2516 if (ret)
2517 return ret;
2518
2519 return pnv_npu_unset_window(gpe_table_group_to_npe(table_group), num);
2520}
2521
2522static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group)
2523{
2524 /*
2525 * Detach NPU first as pnv_ioda2_take_ownership() will destroy
2526 * the iommu_table if 32bit DMA is enabled.
2527 */
2528 pnv_npu_take_ownership(gpe_table_group_to_npe(table_group));
2529 pnv_ioda2_take_ownership(table_group);
2530}
2531
2532static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = {
2533 .get_table_size = pnv_pci_ioda2_get_table_size,
2534 .create_table = pnv_pci_ioda2_create_table,
2535 .set_window = pnv_pci_ioda2_npu_set_window,
2536 .unset_window = pnv_pci_ioda2_npu_unset_window,
2537 .take_ownership = pnv_ioda2_npu_take_ownership,
2538 .release_ownership = pnv_ioda2_release_ownership,
2539};
2540
2541static void pnv_pci_ioda_setup_iommu_api(void)
2542{
2543 struct pci_controller *hose, *tmp;
2544 struct pnv_phb *phb;
2545 struct pnv_ioda_pe *pe, *gpe;
2546
2547 /*
2548 * Now we have all PHBs discovered, time to add NPU devices to
2549 * the corresponding IOMMU groups.
2550 */
2551 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
2552 phb = hose->private_data;
2553
2554 if (phb->type != PNV_PHB_NPU)
2555 continue;
2556
2557 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
2558 gpe = pnv_pci_npu_setup_iommu(pe);
2559 if (gpe)
2560 gpe->table_group.ops = &pnv_pci_ioda2_npu_ops;
2561 }
2562 }
2563}
2564#else /* !CONFIG_IOMMU_API */
2565static void pnv_pci_ioda_setup_iommu_api(void) { };
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002566#endif
2567
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002568static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2569 unsigned levels, unsigned long limit,
Alexey Kardashevskiy3ba3a732015-07-20 20:45:51 +10002570 unsigned long *current_offset, unsigned long *total_allocated)
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002571{
2572 struct page *tce_mem = NULL;
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002573 __be64 *addr, *tmp;
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002574 unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT;
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002575 unsigned long allocated = 1UL << (order + PAGE_SHIFT);
2576 unsigned entries = 1UL << (shift - 3);
2577 long i;
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002578
2579 tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
2580 if (!tce_mem) {
2581 pr_err("Failed to allocate a TCE memory, order=%d\n", order);
2582 return NULL;
2583 }
2584 addr = page_address(tce_mem);
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002585 memset(addr, 0, allocated);
Alexey Kardashevskiy3ba3a732015-07-20 20:45:51 +10002586 *total_allocated += allocated;
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002587
2588 --levels;
2589 if (!levels) {
2590 *current_offset += allocated;
2591 return addr;
2592 }
2593
2594 for (i = 0; i < entries; ++i) {
2595 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
Alexey Kardashevskiy3ba3a732015-07-20 20:45:51 +10002596 levels, limit, current_offset, total_allocated);
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002597 if (!tmp)
2598 break;
2599
2600 addr[i] = cpu_to_be64(__pa(tmp) |
2601 TCE_PCI_READ | TCE_PCI_WRITE);
2602
2603 if (*current_offset >= limit)
2604 break;
2605 }
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002606
2607 return addr;
2608}
2609
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002610static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
2611 unsigned long size, unsigned level);
2612
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002613static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002614 __u32 page_shift, __u64 window_size, __u32 levels,
2615 struct iommu_table *tbl)
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002616{
2617 void *addr;
Alexey Kardashevskiy3ba3a732015-07-20 20:45:51 +10002618 unsigned long offset = 0, level_shift, total_allocated = 0;
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002619 const unsigned window_shift = ilog2(window_size);
2620 unsigned entries_shift = window_shift - page_shift;
2621 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
2622 const unsigned long tce_table_size = 1UL << table_shift;
2623
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002624 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
2625 return -EINVAL;
2626
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002627 if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
2628 return -EINVAL;
2629
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002630 /* Adjust direct table size from window_size and levels */
2631 entries_shift = (entries_shift + levels - 1) / levels;
2632 level_shift = entries_shift + 3;
2633 level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
2634
Alexey Kardashevskiy7aafac12017-02-22 15:43:59 +11002635 if ((level_shift - 3) * levels + page_shift >= 60)
2636 return -EINVAL;
2637
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002638 /* Allocate TCE table */
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002639 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
Alexey Kardashevskiy3ba3a732015-07-20 20:45:51 +10002640 levels, tce_table_size, &offset, &total_allocated);
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002641
2642 /* addr==NULL means that the first level allocation failed */
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002643 if (!addr)
2644 return -ENOMEM;
2645
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002646 /*
2647 * First level was allocated but some lower level failed as
2648 * we did not allocate as much as we wanted,
2649 * release partially allocated table.
2650 */
2651 if (offset < tce_table_size) {
2652 pnv_pci_ioda2_table_do_free_pages(addr,
2653 1ULL << (level_shift - 3), levels - 1);
2654 return -ENOMEM;
2655 }
2656
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002657 /* Setup linux iommu table */
2658 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
2659 page_shift);
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002660 tbl->it_level_size = 1ULL << (level_shift - 3);
2661 tbl->it_indirect_levels = levels - 1;
Alexey Kardashevskiy3ba3a732015-07-20 20:45:51 +10002662 tbl->it_allocated_size = total_allocated;
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002663
2664 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
2665 window_size, tce_table_size, bus_offset);
2666
2667 return 0;
2668}
2669
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002670static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
2671 unsigned long size, unsigned level)
2672{
2673 const unsigned long addr_ul = (unsigned long) addr &
2674 ~(TCE_PCI_READ | TCE_PCI_WRITE);
2675
2676 if (level) {
2677 long i;
2678 u64 *tmp = (u64 *) addr_ul;
2679
2680 for (i = 0; i < size; ++i) {
2681 unsigned long hpa = be64_to_cpu(tmp[i]);
2682
2683 if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
2684 continue;
2685
2686 pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
2687 level - 1);
2688 }
2689 }
2690
2691 free_pages(addr_ul, get_order(size << 3));
2692}
2693
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002694static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
2695{
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002696 const unsigned long size = tbl->it_indirect_levels ?
2697 tbl->it_level_size : tbl->it_size;
2698
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002699 if (!tbl->it_size)
2700 return;
2701
Alexey Kardashevskiybbb845c2015-06-05 16:35:19 +10002702 pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
2703 tbl->it_indirect_levels);
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002704}
2705
Gavin Shan373f5652013-04-25 19:21:01 +00002706static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
2707 struct pnv_ioda_pe *pe)
2708{
Gavin Shan373f5652013-04-25 19:21:01 +00002709 int64_t rc;
2710
Gavin Shanccd1c192016-05-20 16:41:31 +10002711 if (!pnv_pci_ioda_pe_dma_weight(pe))
2712 return;
2713
Alexey Kardashevskiyf87a8862015-06-05 16:35:10 +10002714 /* TVE #1 is selected by PCI address bit 59 */
2715 pe->tce_bypass_base = 1ull << 59;
2716
Alexey Kardashevskiyb348aa62015-06-05 16:35:08 +10002717 iommu_register_group(&pe->table_group, phb->hose->global_number,
2718 pe->pe_number);
Alexey Kardashevskiyc5773822015-06-05 16:34:55 +10002719
Gavin Shan373f5652013-04-25 19:21:01 +00002720 /* The PE will reserve all possible 32-bits space */
Gavin Shan373f5652013-04-25 19:21:01 +00002721 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
Alexey Kardashevskiyaca69132015-06-05 16:35:17 +10002722 phb->ioda.m32_pci_base);
Gavin Shan373f5652013-04-25 19:21:01 +00002723
Alexey Kardashevskiye5aad1e2015-06-05 16:35:16 +10002724 /* Setup linux iommu table */
Alexey Kardashevskiy4793d652015-06-05 16:35:20 +10002725 pe->table_group.tce32_start = 0;
2726 pe->table_group.tce32_size = phb->ioda.m32_pci_base;
2727 pe->table_group.max_dynamic_windows_supported =
2728 IOMMU_TABLE_GROUP_MAX_TABLES;
2729 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
2730 pe->table_group.pgsizes = SZ_4K | SZ_64K | SZ_16M;
Alexey Kardashevskiye5aad1e2015-06-05 16:35:16 +10002731#ifdef CONFIG_IOMMU_API
2732 pe->table_group.ops = &pnv_pci_ioda2_ops;
2733#endif
2734
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002735 rc = pnv_pci_ioda2_setup_default_config(pe);
Gavin Shan801846d2016-05-03 15:41:34 +10002736 if (rc)
Alexey Kardashevskiy46d3e1e2015-06-05 16:35:23 +10002737 return;
Gavin Shan373f5652013-04-25 19:21:01 +00002738
Alexey Kardashevskiy20f13b92017-02-21 13:40:20 +11002739 if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
Alexey Kardashevskiydb08e1d2017-02-21 13:41:31 +11002740 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
Gavin Shan373f5652013-04-25 19:21:01 +00002741}
2742
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002743#ifdef CONFIG_PCI_MSI
Suresh Warrier4ee11c12016-08-19 15:35:49 +10002744int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
Gavin Shan137436c2013-04-25 19:20:59 +00002745{
Gavin Shan137436c2013-04-25 19:20:59 +00002746 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
2747 ioda.irq_chip);
Gavin Shan137436c2013-04-25 19:20:59 +00002748
Suresh Warrier4ee11c12016-08-19 15:35:49 +10002749 return opal_pci_msi_eoi(phb->opal_id, hw_irq);
2750}
2751
2752static void pnv_ioda2_msi_eoi(struct irq_data *d)
2753{
2754 int64_t rc;
2755 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
2756 struct irq_chip *chip = irq_data_get_irq_chip(d);
2757
2758 rc = pnv_opal_pci_msi_eoi(chip, hw_irq);
Gavin Shan137436c2013-04-25 19:20:59 +00002759 WARN_ON_ONCE(rc);
2760
2761 icp_native_eoi(d);
2762}
2763
Ian Munsiefd9a1c22014-10-08 19:54:55 +11002764
Ian Munsief4568342016-07-14 07:17:00 +10002765void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
Ian Munsiefd9a1c22014-10-08 19:54:55 +11002766{
2767 struct irq_data *idata;
2768 struct irq_chip *ichip;
2769
Benjamin Herrenschmidtfb111332016-07-08 16:37:09 +10002770 /* The MSI EOI OPAL call is only needed on PHB3 */
2771 if (phb->model != PNV_PHB_MODEL_PHB3)
Ian Munsiefd9a1c22014-10-08 19:54:55 +11002772 return;
2773
2774 if (!phb->ioda.irq_chip_init) {
2775 /*
2776 * First time we setup an MSI IRQ, we need to setup the
2777 * corresponding IRQ chip to route correctly.
2778 */
2779 idata = irq_get_irq_data(virq);
2780 ichip = irq_data_get_irq_chip(idata);
2781 phb->ioda.irq_chip_init = 1;
2782 phb->ioda.irq_chip = *ichip;
2783 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
2784 }
2785 irq_set_chip(virq, &phb->ioda.irq_chip);
2786}
2787
Suresh Warrier4ee11c12016-08-19 15:35:49 +10002788/*
2789 * Returns true iff chip is something that we could call
2790 * pnv_opal_pci_msi_eoi for.
2791 */
2792bool is_pnv_opal_msi(struct irq_chip *chip)
2793{
2794 return chip->irq_eoi == pnv_ioda2_msi_eoi;
2795}
2796EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
2797
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002798static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
Gavin Shan137436c2013-04-25 19:20:59 +00002799 unsigned int hwirq, unsigned int virq,
2800 unsigned int is_64, struct msi_msg *msg)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002801{
2802 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
2803 unsigned int xive_num = hwirq - phb->msi_base;
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002804 __be32 data;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002805 int rc;
2806
2807 /* No PE assigned ? bail out ... no MSI for you ! */
2808 if (pe == NULL)
2809 return -ENXIO;
2810
2811 /* Check if we have an MVE */
2812 if (pe->mve_number < 0)
2813 return -ENXIO;
2814
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00002815 /* Force 32-bit MSI on some broken devices */
Benjamin Herrenschmidt36074382014-10-07 16:12:36 +11002816 if (dev->no_64bit_msi)
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00002817 is_64 = 0;
2818
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002819 /* Assign XIVE to PE */
2820 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
2821 if (rc) {
2822 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
2823 pci_name(dev), rc, xive_num);
2824 return -EIO;
2825 }
2826
2827 if (is_64) {
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002828 __be64 addr64;
2829
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002830 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
2831 &addr64, &data);
2832 if (rc) {
2833 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
2834 pci_name(dev), rc);
2835 return -EIO;
2836 }
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002837 msg->address_hi = be64_to_cpu(addr64) >> 32;
2838 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002839 } else {
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002840 __be32 addr32;
2841
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002842 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
2843 &addr32, &data);
2844 if (rc) {
2845 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
2846 pci_name(dev), rc);
2847 return -EIO;
2848 }
2849 msg->address_hi = 0;
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002850 msg->address_lo = be32_to_cpu(addr32);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002851 }
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10002852 msg->data = be32_to_cpu(data);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002853
Ian Munsief4568342016-07-14 07:17:00 +10002854 pnv_set_msi_irq_chip(phb, virq);
Gavin Shan137436c2013-04-25 19:20:59 +00002855
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002856 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
Russell Currey1f52f172016-11-16 14:02:15 +11002857 " address=%x_%08x data=%x PE# %x\n",
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002858 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
2859 msg->address_hi, msg->address_lo, data, pe->pe_number);
2860
2861 return 0;
2862}
2863
2864static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
2865{
Gavin Shanfb1b55d2013-03-05 21:12:37 +00002866 unsigned int count;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002867 const __be32 *prop = of_get_property(phb->hose->dn,
2868 "ibm,opal-msi-ranges", NULL);
2869 if (!prop) {
2870 /* BML Fallback */
2871 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
2872 }
2873 if (!prop)
2874 return;
2875
2876 phb->msi_base = be32_to_cpup(prop);
Gavin Shanfb1b55d2013-03-05 21:12:37 +00002877 count = be32_to_cpup(prop + 1);
2878 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002879 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
2880 phb->hose->global_number);
2881 return;
2882 }
Gavin Shanfb1b55d2013-03-05 21:12:37 +00002883
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002884 phb->msi_setup = pnv_pci_ioda_msi_setup;
2885 phb->msi32_support = 1;
2886 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
Gavin Shanfb1b55d2013-03-05 21:12:37 +00002887 count, phb->msi_base);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00002888}
2889#else
2890static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
2891#endif /* CONFIG_PCI_MSI */
2892
Wei Yang6e628c72015-03-25 16:23:55 +08002893#ifdef CONFIG_PCI_IOV
2894static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2895{
Wei Yangf2dd0af2015-10-22 09:22:17 +08002896 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
2897 struct pnv_phb *phb = hose->private_data;
2898 const resource_size_t gate = phb->ioda.m64_segsize >> 2;
Wei Yang6e628c72015-03-25 16:23:55 +08002899 struct resource *res;
2900 int i;
Wei Yangdfcc8d42015-10-22 09:22:18 +08002901 resource_size_t size, total_vf_bar_sz;
Wei Yang6e628c72015-03-25 16:23:55 +08002902 struct pci_dn *pdn;
Wei Yang5b88ec22015-03-25 16:23:58 +08002903 int mul, total_vfs;
Wei Yang6e628c72015-03-25 16:23:55 +08002904
2905 if (!pdev->is_physfn || pdev->is_added)
2906 return;
2907
Wei Yang6e628c72015-03-25 16:23:55 +08002908 pdn = pci_get_pdn(pdev);
2909 pdn->vfs_expanded = 0;
Wei Yangee8222f2015-10-22 09:22:16 +08002910 pdn->m64_single_mode = false;
Wei Yang6e628c72015-03-25 16:23:55 +08002911
Wei Yang5b88ec22015-03-25 16:23:58 +08002912 total_vfs = pci_sriov_get_totalvfs(pdev);
Gavin Shan92b8f132016-05-03 15:41:24 +10002913 mul = phb->ioda.total_pe_num;
Wei Yangdfcc8d42015-10-22 09:22:18 +08002914 total_vf_bar_sz = 0;
Wei Yang5b88ec22015-03-25 16:23:58 +08002915
2916 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2917 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2918 if (!res->flags || res->parent)
2919 continue;
Russell Curreyb79331a2016-09-14 16:37:17 +10002920 if (!pnv_pci_is_m64_flags(res->flags)) {
Wei Yangb0331852015-10-22 09:22:14 +08002921 dev_warn(&pdev->dev, "Don't support SR-IOV with"
2922 " non M64 VF BAR%d: %pR. \n",
Wei Yang5b88ec22015-03-25 16:23:58 +08002923 i, res);
Wei Yangb0331852015-10-22 09:22:14 +08002924 goto truncate_iov;
Wei Yang5b88ec22015-03-25 16:23:58 +08002925 }
2926
Wei Yangdfcc8d42015-10-22 09:22:18 +08002927 total_vf_bar_sz += pci_iov_resource_size(pdev,
2928 i + PCI_IOV_RESOURCES);
Wei Yang5b88ec22015-03-25 16:23:58 +08002929
Wei Yangf2dd0af2015-10-22 09:22:17 +08002930 /*
2931 * If bigger than quarter of M64 segment size, just round up
2932 * power of two.
2933 *
2934 * Generally, one M64 BAR maps one IOV BAR. To avoid conflict
2935 * with other devices, IOV BAR size is expanded to be
2936 * (total_pe * VF_BAR_size). When VF_BAR_size is half of M64
2937 * segment size , the expanded size would equal to half of the
2938 * whole M64 space size, which will exhaust the M64 Space and
2939 * limit the system flexibility. This is a design decision to
2940 * set the boundary to quarter of the M64 segment size.
2941 */
Wei Yangdfcc8d42015-10-22 09:22:18 +08002942 if (total_vf_bar_sz > gate) {
Wei Yang5b88ec22015-03-25 16:23:58 +08002943 mul = roundup_pow_of_two(total_vfs);
Wei Yangdfcc8d42015-10-22 09:22:18 +08002944 dev_info(&pdev->dev,
2945 "VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n",
2946 total_vf_bar_sz, gate, mul);
Wei Yangee8222f2015-10-22 09:22:16 +08002947 pdn->m64_single_mode = true;
Wei Yang5b88ec22015-03-25 16:23:58 +08002948 break;
2949 }
2950 }
2951
Wei Yang6e628c72015-03-25 16:23:55 +08002952 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2953 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2954 if (!res->flags || res->parent)
2955 continue;
Wei Yang6e628c72015-03-25 16:23:55 +08002956
Wei Yang6e628c72015-03-25 16:23:55 +08002957 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
Wei Yangee8222f2015-10-22 09:22:16 +08002958 /*
2959 * On PHB3, the minimum size alignment of M64 BAR in single
2960 * mode is 32MB.
2961 */
2962 if (pdn->m64_single_mode && (size < SZ_32M))
2963 goto truncate_iov;
2964 dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
Wei Yang5b88ec22015-03-25 16:23:58 +08002965 res->end = res->start + size * mul - 1;
Wei Yang6e628c72015-03-25 16:23:55 +08002966 dev_dbg(&pdev->dev, " %pR\n", res);
2967 dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
Wei Yang5b88ec22015-03-25 16:23:58 +08002968 i, res, mul);
Wei Yang6e628c72015-03-25 16:23:55 +08002969 }
Wei Yang5b88ec22015-03-25 16:23:58 +08002970 pdn->vfs_expanded = mul;
Wei Yangb0331852015-10-22 09:22:14 +08002971
2972 return;
2973
2974truncate_iov:
2975 /* To save MMIO space, IOV BAR is truncated. */
2976 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2977 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2978 res->flags = 0;
2979 res->end = res->start - 1;
2980 }
Wei Yang6e628c72015-03-25 16:23:55 +08002981}
2982#endif /* CONFIG_PCI_IOV */
2983
Gavin Shan23e79422016-05-03 15:41:27 +10002984static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
2985 struct resource *res)
2986{
2987 struct pnv_phb *phb = pe->phb;
2988 struct pci_bus_region region;
2989 int index;
2990 int64_t rc;
2991
2992 if (!res || !res->flags || res->start > res->end)
2993 return;
2994
2995 if (res->flags & IORESOURCE_IO) {
2996 region.start = res->start - phb->ioda.io_pci_base;
2997 region.end = res->end - phb->ioda.io_pci_base;
2998 index = region.start / phb->ioda.io_segsize;
2999
3000 while (index < phb->ioda.total_pe_num &&
3001 region.start <= region.end) {
3002 phb->ioda.io_segmap[index] = pe->pe_number;
3003 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3004 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
3005 if (rc != OPAL_SUCCESS) {
Russell Currey1f52f172016-11-16 14:02:15 +11003006 pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
Gavin Shan23e79422016-05-03 15:41:27 +10003007 __func__, rc, index, pe->pe_number);
3008 break;
3009 }
3010
3011 region.start += phb->ioda.io_segsize;
3012 index++;
3013 }
3014 } else if ((res->flags & IORESOURCE_MEM) &&
Benjamin Herrenschmidt5958d192016-07-08 15:55:43 +10003015 !pnv_pci_is_m64(phb, res)) {
Gavin Shan23e79422016-05-03 15:41:27 +10003016 region.start = res->start -
3017 phb->hose->mem_offset[0] -
3018 phb->ioda.m32_pci_base;
3019 region.end = res->end -
3020 phb->hose->mem_offset[0] -
3021 phb->ioda.m32_pci_base;
3022 index = region.start / phb->ioda.m32_segsize;
3023
3024 while (index < phb->ioda.total_pe_num &&
3025 region.start <= region.end) {
3026 phb->ioda.m32_segmap[index] = pe->pe_number;
3027 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3028 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
3029 if (rc != OPAL_SUCCESS) {
Russell Currey1f52f172016-11-16 14:02:15 +11003030 pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
Gavin Shan23e79422016-05-03 15:41:27 +10003031 __func__, rc, index, pe->pe_number);
3032 break;
3033 }
3034
3035 region.start += phb->ioda.m32_segsize;
3036 index++;
3037 }
3038 }
3039}
3040
Gavin Shan11685be2012-08-20 03:49:16 +00003041/*
3042 * This function is supposed to be called on basis of PE from top
3043 * to bottom style. So the the I/O or MMIO segment assigned to
Masahiro Yamada03671052017-02-27 14:29:28 -08003044 * parent PE could be overridden by its child PEs if necessary.
Gavin Shan11685be2012-08-20 03:49:16 +00003045 */
Gavin Shan23e79422016-05-03 15:41:27 +10003046static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
Gavin Shan11685be2012-08-20 03:49:16 +00003047{
Gavin Shan69d733e2016-05-03 15:41:28 +10003048 struct pci_dev *pdev;
Gavin Shan23e79422016-05-03 15:41:27 +10003049 int i;
Gavin Shan11685be2012-08-20 03:49:16 +00003050
3051 /*
3052 * NOTE: We only care PCI bus based PE for now. For PCI
3053 * device based PE, for example SRIOV sensitive VF should
3054 * be figured out later.
3055 */
3056 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
3057
Gavin Shan69d733e2016-05-03 15:41:28 +10003058 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
3059 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
3060 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]);
3061
3062 /*
3063 * If the PE contains all subordinate PCI buses, the
3064 * windows of the child bridges should be mapped to
3065 * the PE as well.
3066 */
3067 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev))
3068 continue;
3069 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
3070 pnv_ioda_setup_pe_res(pe,
3071 &pdev->resource[PCI_BRIDGE_RESOURCES + i]);
3072 }
Gavin Shan11685be2012-08-20 03:49:16 +00003073}
3074
Russell Currey98b665d2016-07-28 15:05:03 +10003075#ifdef CONFIG_DEBUG_FS
3076static int pnv_pci_diag_data_set(void *data, u64 val)
3077{
3078 struct pci_controller *hose;
3079 struct pnv_phb *phb;
3080 s64 ret;
3081
3082 if (val != 1ULL)
3083 return -EINVAL;
3084
3085 hose = (struct pci_controller *)data;
3086 if (!hose || !hose->private_data)
3087 return -ENODEV;
3088
3089 phb = hose->private_data;
3090
3091 /* Retrieve the diag data from firmware */
3092 ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
3093 PNV_PCI_DIAG_BUF_SIZE);
3094 if (ret != OPAL_SUCCESS)
3095 return -EIO;
3096
3097 /* Print the diag data to the kernel log */
3098 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
3099 return 0;
3100}
3101
3102DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops, NULL,
3103 pnv_pci_diag_data_set, "%llu\n");
3104
3105#endif /* CONFIG_DEBUG_FS */
3106
Gavin Shan37c367f2013-06-20 18:13:25 +08003107static void pnv_pci_ioda_create_dbgfs(void)
3108{
3109#ifdef CONFIG_DEBUG_FS
3110 struct pci_controller *hose, *tmp;
3111 struct pnv_phb *phb;
3112 char name[16];
3113
3114 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
3115 phb = hose->private_data;
3116
Gavin Shanccd1c192016-05-20 16:41:31 +10003117 /* Notify initialization of PHB done */
3118 phb->initialized = 1;
3119
Gavin Shan37c367f2013-06-20 18:13:25 +08003120 sprintf(name, "PCI%04x", hose->global_number);
3121 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
Russell Currey98b665d2016-07-28 15:05:03 +10003122 if (!phb->dbgfs) {
Gavin Shan37c367f2013-06-20 18:13:25 +08003123 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
3124 __func__, hose->global_number);
Russell Currey98b665d2016-07-28 15:05:03 +10003125 continue;
3126 }
3127
3128 debugfs_create_file("dump_diag_regs", 0200, phb->dbgfs, hose,
3129 &pnv_pci_diag_data_fops);
Gavin Shan37c367f2013-06-20 18:13:25 +08003130 }
3131#endif /* CONFIG_DEBUG_FS */
3132}
3133
Greg Kroah-Hartmancad5cef2012-12-21 14:04:10 -08003134static void pnv_pci_ioda_fixup(void)
Gavin Shanfb446ad2012-08-20 03:49:14 +00003135{
3136 pnv_pci_ioda_setup_PEs();
Gavin Shanccd1c192016-05-20 16:41:31 +10003137 pnv_pci_ioda_setup_iommu_api();
Gavin Shan37c367f2013-06-20 18:13:25 +08003138 pnv_pci_ioda_create_dbgfs();
3139
Gavin Shane9cc17d2013-06-20 13:21:14 +08003140#ifdef CONFIG_EEH
Gavin Shane9cc17d2013-06-20 13:21:14 +08003141 eeh_init();
Mike Qiudadcd6d2014-06-26 02:58:47 -04003142 eeh_addr_cache_build();
Gavin Shane9cc17d2013-06-20 13:21:14 +08003143#endif
Gavin Shanfb446ad2012-08-20 03:49:14 +00003144}
3145
Gavin Shan271fd032012-09-11 16:59:47 -06003146/*
3147 * Returns the alignment for I/O or memory windows for P2P
3148 * bridges. That actually depends on how PEs are segmented.
3149 * For now, we return I/O or M32 segment size for PE sensitive
3150 * P2P bridges. Otherwise, the default values (4KiB for I/O,
3151 * 1MiB for memory) will be returned.
3152 *
3153 * The current PCI bus might be put into one PE, which was
3154 * create against the parent PCI bridge. For that case, we
3155 * needn't enlarge the alignment so that we can save some
3156 * resources.
3157 */
3158static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
3159 unsigned long type)
3160{
3161 struct pci_dev *bridge;
3162 struct pci_controller *hose = pci_bus_to_host(bus);
3163 struct pnv_phb *phb = hose->private_data;
3164 int num_pci_bridges = 0;
3165
3166 bridge = bus->self;
3167 while (bridge) {
3168 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
3169 num_pci_bridges++;
3170 if (num_pci_bridges >= 2)
3171 return 1;
3172 }
3173
3174 bridge = bridge->bus->self;
3175 }
3176
Benjamin Herrenschmidt5958d192016-07-08 15:55:43 +10003177 /*
3178 * We fall back to M32 if M64 isn't supported. We enforce the M64
3179 * alignment for any 64-bit resource, PCIe doesn't care and
3180 * bridges only do 64-bit prefetchable anyway.
3181 */
Russell Curreyb79331a2016-09-14 16:37:17 +10003182 if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
Guo Chao262af552014-07-21 14:42:30 +10003183 return phb->ioda.m64_segsize;
Gavin Shan271fd032012-09-11 16:59:47 -06003184 if (type & IORESOURCE_MEM)
3185 return phb->ioda.m32_segsize;
3186
3187 return phb->ioda.io_segsize;
3188}
3189
Gavin Shan40e2a472016-05-20 16:41:33 +10003190/*
3191 * We are updating root port or the upstream port of the
3192 * bridge behind the root port with PHB's windows in order
3193 * to accommodate the changes on required resources during
3194 * PCI (slot) hotplug, which is connected to either root
3195 * port or the downstream ports of PCIe switch behind the
3196 * root port.
3197 */
3198static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
3199 unsigned long type)
3200{
3201 struct pci_controller *hose = pci_bus_to_host(bus);
3202 struct pnv_phb *phb = hose->private_data;
3203 struct pci_dev *bridge = bus->self;
3204 struct resource *r, *w;
3205 bool msi_region = false;
3206 int i;
3207
3208 /* Check if we need apply fixup to the bridge's windows */
3209 if (!pci_is_root_bus(bridge->bus) &&
3210 !pci_is_root_bus(bridge->bus->self->bus))
3211 return;
3212
3213 /* Fixup the resources */
3214 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
3215 r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
3216 if (!r->flags || !r->parent)
3217 continue;
3218
3219 w = NULL;
3220 if (r->flags & type & IORESOURCE_IO)
3221 w = &hose->io_resource;
Benjamin Herrenschmidt5958d192016-07-08 15:55:43 +10003222 else if (pnv_pci_is_m64(phb, r) &&
Gavin Shan40e2a472016-05-20 16:41:33 +10003223 (type & IORESOURCE_PREFETCH) &&
3224 phb->ioda.m64_segsize)
3225 w = &hose->mem_resources[1];
3226 else if (r->flags & type & IORESOURCE_MEM) {
3227 w = &hose->mem_resources[0];
3228 msi_region = true;
3229 }
3230
3231 r->start = w->start;
3232 r->end = w->end;
3233
3234 /* The 64KB 32-bits MSI region shouldn't be included in
3235 * the 32-bits bridge window. Otherwise, we can see strange
3236 * issues. One of them is EEH error observed on Garrison.
3237 *
3238 * Exclude top 1MB region which is the minimal alignment of
3239 * 32-bits bridge window.
3240 */
3241 if (msi_region) {
3242 r->end += 0x10000;
3243 r->end -= 0x100000;
3244 }
3245 }
3246}
3247
Gavin Shanccd1c192016-05-20 16:41:31 +10003248static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
3249{
3250 struct pci_controller *hose = pci_bus_to_host(bus);
3251 struct pnv_phb *phb = hose->private_data;
3252 struct pci_dev *bridge = bus->self;
3253 struct pnv_ioda_pe *pe;
3254 bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
3255
Gavin Shan40e2a472016-05-20 16:41:33 +10003256 /* Extend bridge's windows if necessary */
3257 pnv_pci_fixup_bridge_resources(bus, type);
3258
Gavin Shan63803c32016-05-20 16:41:32 +10003259 /* The PE for root bus should be realized before any one else */
3260 if (!phb->ioda.root_pe_populated) {
3261 pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
3262 if (pe) {
3263 phb->ioda.root_pe_idx = pe->pe_number;
3264 phb->ioda.root_pe_populated = true;
3265 }
3266 }
3267
Gavin Shanccd1c192016-05-20 16:41:31 +10003268 /* Don't assign PE to PCI bus, which doesn't have subordinate devices */
3269 if (list_empty(&bus->devices))
3270 return;
3271
3272 /* Reserve PEs according to used M64 resources */
3273 if (phb->reserve_m64_pe)
3274 phb->reserve_m64_pe(bus, NULL, all);
3275
3276 /*
3277 * Assign PE. We might run here because of partial hotplug.
3278 * For the case, we just pick up the existing PE and should
3279 * not allocate resources again.
3280 */
3281 pe = pnv_ioda_setup_bus_PE(bus, all);
3282 if (!pe)
3283 return;
3284
3285 pnv_ioda_setup_pe_seg(pe);
3286 switch (phb->type) {
3287 case PNV_PHB_IODA1:
3288 pnv_pci_ioda1_setup_dma_pe(phb, pe);
3289 break;
3290 case PNV_PHB_IODA2:
3291 pnv_pci_ioda2_setup_dma_pe(phb, pe);
3292 break;
3293 default:
Russell Currey1f52f172016-11-16 14:02:15 +11003294 pr_warn("%s: No DMA for PHB#%x (type %d)\n",
Gavin Shanccd1c192016-05-20 16:41:31 +10003295 __func__, phb->hose->global_number, phb->type);
3296 }
3297}
3298
Wei Yang5350ab32015-03-25 16:23:56 +08003299#ifdef CONFIG_PCI_IOV
3300static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
3301 int resno)
3302{
Wei Yangee8222f2015-10-22 09:22:16 +08003303 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
3304 struct pnv_phb *phb = hose->private_data;
Wei Yang5350ab32015-03-25 16:23:56 +08003305 struct pci_dn *pdn = pci_get_pdn(pdev);
Wei Yang7fbe7a92015-10-22 09:22:15 +08003306 resource_size_t align;
Wei Yang5350ab32015-03-25 16:23:56 +08003307
Wei Yang7fbe7a92015-10-22 09:22:15 +08003308 /*
3309 * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
3310 * SR-IOV. While from hardware perspective, the range mapped by M64
3311 * BAR should be size aligned.
3312 *
Wei Yangee8222f2015-10-22 09:22:16 +08003313 * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra
3314 * powernv-specific hardware restriction is gone. But if just use the
3315 * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with
3316 * in one segment of M64 #15, which introduces the PE conflict between
3317 * PF and VF. Based on this, the minimum alignment of an IOV BAR is
3318 * m64_segsize.
3319 *
Wei Yang7fbe7a92015-10-22 09:22:15 +08003320 * This function returns the total IOV BAR size if M64 BAR is in
3321 * Shared PE mode or just VF BAR size if not.
Wei Yangee8222f2015-10-22 09:22:16 +08003322 * If the M64 BAR is in Single PE mode, return the VF BAR size or
3323 * M64 segment size if IOV BAR size is less.
Wei Yang7fbe7a92015-10-22 09:22:15 +08003324 */
Wei Yang5350ab32015-03-25 16:23:56 +08003325 align = pci_iov_resource_size(pdev, resno);
Wei Yang7fbe7a92015-10-22 09:22:15 +08003326 if (!pdn->vfs_expanded)
3327 return align;
Wei Yangee8222f2015-10-22 09:22:16 +08003328 if (pdn->m64_single_mode)
3329 return max(align, (resource_size_t)phb->ioda.m64_segsize);
Wei Yang5350ab32015-03-25 16:23:56 +08003330
Wei Yang7fbe7a92015-10-22 09:22:15 +08003331 return pdn->vfs_expanded * align;
Wei Yang5350ab32015-03-25 16:23:56 +08003332}
3333#endif /* CONFIG_PCI_IOV */
3334
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003335/* Prevent enabling devices for which we couldn't properly
3336 * assign a PE
3337 */
Ian Munsie4361b032016-07-14 07:17:06 +10003338bool pnv_pci_enable_device_hook(struct pci_dev *dev)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003339{
Gavin Shandb1266c2012-08-20 03:49:18 +00003340 struct pci_controller *hose = pci_bus_to_host(dev->bus);
3341 struct pnv_phb *phb = hose->private_data;
3342 struct pci_dn *pdn;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003343
Gavin Shandb1266c2012-08-20 03:49:18 +00003344 /* The function is probably called while the PEs have
3345 * not be created yet. For example, resource reassignment
3346 * during PCI probe period. We just skip the check if
3347 * PEs isn't ready.
3348 */
3349 if (!phb->initialized)
Daniel Axtensc88c2a12015-03-31 16:00:41 +11003350 return true;
Gavin Shandb1266c2012-08-20 03:49:18 +00003351
Benjamin Herrenschmidtb72c1f62013-05-21 22:58:21 +00003352 pdn = pci_get_pdn(dev);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003353 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
Daniel Axtensc88c2a12015-03-31 16:00:41 +11003354 return false;
Gavin Shandb1266c2012-08-20 03:49:18 +00003355
Daniel Axtensc88c2a12015-03-31 16:00:41 +11003356 return true;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003357}
3358
Gavin Shanc5f77002016-05-20 16:41:35 +10003359static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
3360 int num)
3361{
3362 struct pnv_ioda_pe *pe = container_of(table_group,
3363 struct pnv_ioda_pe, table_group);
3364 struct pnv_phb *phb = pe->phb;
3365 unsigned int idx;
3366 long rc;
3367
3368 pe_info(pe, "Removing DMA window #%d\n", num);
3369 for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
3370 if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
3371 continue;
3372
3373 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
3374 idx, 0, 0ul, 0ul, 0ul);
3375 if (rc != OPAL_SUCCESS) {
3376 pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
3377 rc, idx);
3378 return rc;
3379 }
3380
3381 phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
3382 }
3383
3384 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
3385 return OPAL_SUCCESS;
3386}
3387
3388static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
3389{
3390 unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
3391 struct iommu_table *tbl = pe->table_group.tables[0];
3392 int64_t rc;
3393
3394 if (!weight)
3395 return;
3396
3397 rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
3398 if (rc != OPAL_SUCCESS)
3399 return;
3400
Benjamin Herrenschmidta34ab7c2016-07-08 16:37:12 +10003401 pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
Gavin Shanc5f77002016-05-20 16:41:35 +10003402 if (pe->table_group.group) {
3403 iommu_group_put(pe->table_group.group);
3404 WARN_ON(pe->table_group.group);
3405 }
3406
3407 free_pages(tbl->it_base, get_order(tbl->it_size << 3));
3408 iommu_free_table(tbl, "pnv");
3409}
3410
3411static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
3412{
3413 struct iommu_table *tbl = pe->table_group.tables[0];
3414 unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
3415#ifdef CONFIG_IOMMU_API
3416 int64_t rc;
3417#endif
3418
3419 if (!weight)
3420 return;
3421
3422#ifdef CONFIG_IOMMU_API
3423 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
3424 if (rc)
3425 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
3426#endif
3427
3428 pnv_pci_ioda2_set_bypass(pe, false);
3429 if (pe->table_group.group) {
3430 iommu_group_put(pe->table_group.group);
3431 WARN_ON(pe->table_group.group);
3432 }
3433
3434 pnv_pci_ioda2_table_free_pages(tbl);
3435 iommu_free_table(tbl, "pnv");
3436}
3437
3438static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
3439 unsigned short win,
3440 unsigned int *map)
3441{
3442 struct pnv_phb *phb = pe->phb;
3443 int idx;
3444 int64_t rc;
3445
3446 for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
3447 if (map[idx] != pe->pe_number)
3448 continue;
3449
3450 if (win == OPAL_M64_WINDOW_TYPE)
3451 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3452 phb->ioda.reserved_pe_idx, win,
3453 idx / PNV_IODA1_M64_SEGS,
3454 idx % PNV_IODA1_M64_SEGS);
3455 else
3456 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
3457 phb->ioda.reserved_pe_idx, win, 0, idx);
3458
3459 if (rc != OPAL_SUCCESS)
3460 pe_warn(pe, "Error %ld unmapping (%d) segment#%d\n",
3461 rc, win, idx);
3462
3463 map[idx] = IODA_INVALID_PE;
3464 }
3465}
3466
3467static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
3468{
3469 struct pnv_phb *phb = pe->phb;
3470
3471 if (phb->type == PNV_PHB_IODA1) {
3472 pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
3473 phb->ioda.io_segmap);
3474 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
3475 phb->ioda.m32_segmap);
3476 pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE,
3477 phb->ioda.m64_segmap);
3478 } else if (phb->type == PNV_PHB_IODA2) {
3479 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
3480 phb->ioda.m32_segmap);
3481 }
3482}
3483
3484static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
3485{
3486 struct pnv_phb *phb = pe->phb;
3487 struct pnv_ioda_pe *slave, *tmp;
3488
Gavin Shanc5f77002016-05-20 16:41:35 +10003489 list_del(&pe->list);
3490 switch (phb->type) {
3491 case PNV_PHB_IODA1:
3492 pnv_pci_ioda1_release_pe_dma(pe);
3493 break;
3494 case PNV_PHB_IODA2:
3495 pnv_pci_ioda2_release_pe_dma(pe);
3496 break;
3497 default:
3498 WARN_ON(1);
3499 }
3500
3501 pnv_ioda_release_pe_seg(pe);
3502 pnv_ioda_deconfigure_pe(pe->phb, pe);
Gavin Shanb3144272016-09-06 14:16:44 +10003503
3504 /* Release slave PEs in the compound PE */
3505 if (pe->flags & PNV_IODA_PE_MASTER) {
3506 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
3507 list_del(&slave->list);
3508 pnv_ioda_free_pe(slave);
3509 }
3510 }
3511
Gavin Shan6eaed162016-09-13 16:40:24 +10003512 /*
3513 * The PE for root bus can be removed because of hotplug in EEH
3514 * recovery for fenced PHB error. We need to mark the PE dead so
3515 * that it can be populated again in PCI hot add path. The PE
3516 * shouldn't be destroyed as it's the global reserved resource.
3517 */
3518 if (phb->ioda.root_pe_populated &&
3519 phb->ioda.root_pe_idx == pe->pe_number)
3520 phb->ioda.root_pe_populated = false;
3521 else
3522 pnv_ioda_free_pe(pe);
Gavin Shanc5f77002016-05-20 16:41:35 +10003523}
3524
3525static void pnv_pci_release_device(struct pci_dev *pdev)
3526{
3527 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
3528 struct pnv_phb *phb = hose->private_data;
3529 struct pci_dn *pdn = pci_get_pdn(pdev);
3530 struct pnv_ioda_pe *pe;
3531
3532 if (pdev->is_virtfn)
3533 return;
3534
3535 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
3536 return;
3537
Gavin Shan29bf2822016-09-06 16:34:01 +10003538 /*
3539 * PCI hotplug can happen as part of EEH error recovery. The @pdn
3540 * isn't removed and added afterwards in this scenario. We should
3541 * set the PE number in @pdn to an invalid one. Otherwise, the PE's
3542 * device count is decreased on removing devices while failing to
3543 * be increased on adding devices. It leads to unbalanced PE's device
3544 * count and eventually make normal PCI hotplug path broken.
3545 */
Gavin Shanc5f77002016-05-20 16:41:35 +10003546 pe = &phb->ioda.pe_array[pdn->pe_number];
Gavin Shan29bf2822016-09-06 16:34:01 +10003547 pdn->pe_number = IODA_INVALID_PE;
3548
Gavin Shanc5f77002016-05-20 16:41:35 +10003549 WARN_ON(--pe->device_count < 0);
3550 if (pe->device_count == 0)
3551 pnv_ioda_release_pe(pe);
3552}
3553
Michael Neuling7a8e6bb2015-05-27 16:06:59 +10003554static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
Benjamin Herrenschmidt73ed1482013-05-10 16:59:18 +10003555{
Michael Neuling7a8e6bb2015-05-27 16:06:59 +10003556 struct pnv_phb *phb = hose->private_data;
3557
Gavin Shand1a85ee2014-09-30 12:39:05 +10003558 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
Benjamin Herrenschmidt73ed1482013-05-10 16:59:18 +10003559 OPAL_ASSERT_RESET);
3560}
3561
Daniel Axtens92ae0352015-04-28 15:12:05 +10003562static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
Gavin Shancb4224c2016-05-03 15:41:21 +10003563 .dma_dev_setup = pnv_pci_dma_dev_setup,
3564 .dma_bus_setup = pnv_pci_dma_bus_setup,
Daniel Axtens92ae0352015-04-28 15:12:05 +10003565#ifdef CONFIG_PCI_MSI
Gavin Shancb4224c2016-05-03 15:41:21 +10003566 .setup_msi_irqs = pnv_setup_msi_irqs,
3567 .teardown_msi_irqs = pnv_teardown_msi_irqs,
Daniel Axtens92ae0352015-04-28 15:12:05 +10003568#endif
Gavin Shancb4224c2016-05-03 15:41:21 +10003569 .enable_device_hook = pnv_pci_enable_device_hook,
Gavin Shanc5f77002016-05-20 16:41:35 +10003570 .release_device = pnv_pci_release_device,
Gavin Shancb4224c2016-05-03 15:41:21 +10003571 .window_alignment = pnv_pci_window_alignment,
Gavin Shanccd1c192016-05-20 16:41:31 +10003572 .setup_bridge = pnv_pci_setup_bridge,
Gavin Shancb4224c2016-05-03 15:41:21 +10003573 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
3574 .dma_set_mask = pnv_pci_ioda_dma_set_mask,
3575 .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
3576 .shutdown = pnv_pci_ioda_shutdown,
Daniel Axtens92ae0352015-04-28 15:12:05 +10003577};
3578
Alexey Kardashevskiyf9f83452016-04-29 18:55:20 +10003579static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask)
3580{
3581 dev_err_once(&npdev->dev,
3582 "%s operation unsupported for NVLink devices\n",
3583 __func__);
3584 return -EPERM;
3585}
3586
Alistair Popple5d2aa712015-12-17 13:43:13 +11003587static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
Gavin Shancb4224c2016-05-03 15:41:21 +10003588 .dma_dev_setup = pnv_pci_dma_dev_setup,
Alistair Popple5d2aa712015-12-17 13:43:13 +11003589#ifdef CONFIG_PCI_MSI
Gavin Shancb4224c2016-05-03 15:41:21 +10003590 .setup_msi_irqs = pnv_setup_msi_irqs,
3591 .teardown_msi_irqs = pnv_teardown_msi_irqs,
Alistair Popple5d2aa712015-12-17 13:43:13 +11003592#endif
Gavin Shancb4224c2016-05-03 15:41:21 +10003593 .enable_device_hook = pnv_pci_enable_device_hook,
3594 .window_alignment = pnv_pci_window_alignment,
3595 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
3596 .dma_set_mask = pnv_npu_dma_set_mask,
3597 .shutdown = pnv_pci_ioda_shutdown,
Alistair Popple5d2aa712015-12-17 13:43:13 +11003598};
3599
Ian Munsie4361b032016-07-14 07:17:06 +10003600#ifdef CONFIG_CXL_BASE
3601const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops = {
3602 .dma_dev_setup = pnv_pci_dma_dev_setup,
3603 .dma_bus_setup = pnv_pci_dma_bus_setup,
Ian Munsiea2f67d52016-07-14 07:17:10 +10003604#ifdef CONFIG_PCI_MSI
3605 .setup_msi_irqs = pnv_cxl_cx4_setup_msi_irqs,
3606 .teardown_msi_irqs = pnv_cxl_cx4_teardown_msi_irqs,
3607#endif
Ian Munsie4361b032016-07-14 07:17:06 +10003608 .enable_device_hook = pnv_cxl_enable_device_hook,
3609 .disable_device = pnv_cxl_disable_device,
3610 .release_device = pnv_pci_release_device,
3611 .window_alignment = pnv_pci_window_alignment,
3612 .setup_bridge = pnv_pci_setup_bridge,
3613 .reset_secondary_bus = pnv_pci_reset_secondary_bus,
3614 .dma_set_mask = pnv_pci_ioda_dma_set_mask,
3615 .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
3616 .shutdown = pnv_pci_ioda_shutdown,
3617};
3618#endif
3619
Anton Blancharde51df2c2014-08-20 08:55:18 +10003620static void __init pnv_pci_init_ioda_phb(struct device_node *np,
3621 u64 hub_id, int ioda_type)
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003622{
3623 struct pci_controller *hose;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003624 struct pnv_phb *phb;
Gavin Shan2b923ed2016-05-05 12:04:16 +10003625 unsigned long size, m64map_off, m32map_off, pemap_off;
3626 unsigned long iomap_off = 0, dma32map_off = 0;
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10003627 struct resource r;
Alistair Popplec681b932013-09-23 12:04:57 +10003628 const __be64 *prop64;
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10003629 const __be32 *prop32;
Gavin Shanf1b7cc32013-07-31 16:47:01 +08003630 int len;
Gavin Shan3fa23ff2016-05-03 15:41:26 +10003631 unsigned int segno;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003632 u64 phb_id;
3633 void *aux;
3634 long rc;
3635
Benjamin Herrenschmidt08a45b32016-07-08 16:37:17 +10003636 if (!of_device_is_available(np))
3637 return;
3638
Gavin Shan9497a1c2016-06-21 12:35:56 +10003639 pr_info("Initializing %s PHB (%s)\n",
3640 pnv_phb_names[ioda_type], of_node_full_name(np));
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003641
3642 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
3643 if (!prop64) {
3644 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
3645 return;
3646 }
3647 phb_id = be64_to_cpup(prop64);
3648 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
3649
Michael Ellermane39f223f2014-11-18 16:47:35 +11003650 phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
Gavin Shan58d714e2013-07-31 16:47:00 +08003651
3652 /* Allocate PCI controller */
Gavin Shan58d714e2013-07-31 16:47:00 +08003653 phb->hose = hose = pcibios_alloc_controller(np);
3654 if (!phb->hose) {
3655 pr_err(" Can't allocate PCI controller for %s\n",
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003656 np->full_name);
Michael Ellermane39f223f2014-11-18 16:47:35 +11003657 memblock_free(__pa(phb), sizeof(struct pnv_phb));
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003658 return;
3659 }
3660
3661 spin_lock_init(&phb->lock);
Gavin Shanf1b7cc32013-07-31 16:47:01 +08003662 prop32 = of_get_property(np, "bus-range", &len);
3663 if (prop32 && len == 8) {
Benjamin Herrenschmidt3a1a4662013-09-23 12:05:01 +10003664 hose->first_busno = be32_to_cpu(prop32[0]);
3665 hose->last_busno = be32_to_cpu(prop32[1]);
Gavin Shanf1b7cc32013-07-31 16:47:01 +08003666 } else {
3667 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
3668 hose->first_busno = 0;
3669 hose->last_busno = 0xff;
3670 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003671 hose->private_data = phb;
Gavin Shane9cc17d2013-06-20 13:21:14 +08003672 phb->hub_id = hub_id;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003673 phb->opal_id = phb_id;
Gavin Shanaa0c0332013-04-25 19:20:57 +00003674 phb->type = ioda_type;
Wei Yang781a8682015-03-25 16:23:57 +08003675 mutex_init(&phb->ioda.pe_alloc_mutex);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003676
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +00003677 /* Detect specific models for error handling */
3678 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
3679 phb->model = PNV_PHB_MODEL_P7IOC;
Benjamin Herrenschmidtf3d40c22013-05-04 14:24:32 +00003680 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
Gavin Shanaa0c0332013-04-25 19:20:57 +00003681 phb->model = PNV_PHB_MODEL_PHB3;
Alistair Popple5d2aa712015-12-17 13:43:13 +11003682 else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
3683 phb->model = PNV_PHB_MODEL_NPU;
Alistair Popple616badd2017-01-10 15:41:44 +11003684 else if (of_device_is_compatible(np, "ibm,power9-npu-pciex"))
3685 phb->model = PNV_PHB_MODEL_NPU2;
Benjamin Herrenschmidtcee72d52011-11-29 18:22:53 +00003686 else
3687 phb->model = PNV_PHB_MODEL_UNKNOWN;
3688
Gavin Shanaa0c0332013-04-25 19:20:57 +00003689 /* Parse 32-bit and IO ranges (if any) */
Gavin Shan2f1ec022013-07-31 16:47:02 +08003690 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003691
Gavin Shanaa0c0332013-04-25 19:20:57 +00003692 /* Get registers */
Benjamin Herrenschmidtfd141d1a2016-07-08 16:37:14 +10003693 if (!of_address_to_resource(np, 0, &r)) {
3694 phb->regs_phys = r.start;
3695 phb->regs = ioremap(r.start, resource_size(&r));
3696 if (phb->regs == NULL)
3697 pr_err(" Failed to map registers !\n");
3698 }
Gavin Shan577c8c82016-05-20 16:41:28 +10003699
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003700 /* Initialize more IODA stuff */
Gavin Shan92b8f132016-05-03 15:41:24 +10003701 phb->ioda.total_pe_num = 1;
Gavin Shanaa0c0332013-04-25 19:20:57 +00003702 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
Gavin Shan36954dc2013-11-04 16:32:47 +08003703 if (prop32)
Gavin Shan92b8f132016-05-03 15:41:24 +10003704 phb->ioda.total_pe_num = be32_to_cpup(prop32);
Gavin Shan36954dc2013-11-04 16:32:47 +08003705 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
3706 if (prop32)
Gavin Shan92b8f132016-05-03 15:41:24 +10003707 phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
Guo Chao262af552014-07-21 14:42:30 +10003708
Gavin Shanc1275622016-05-20 16:41:29 +10003709 /* Invalidate RID to PE# mapping */
3710 for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
3711 phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
3712
Guo Chao262af552014-07-21 14:42:30 +10003713 /* Parse 64-bit MMIO range */
3714 pnv_ioda_parse_m64_window(phb);
3715
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003716 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
Gavin Shanaa0c0332013-04-25 19:20:57 +00003717 /* FW Has already off top 64k of M32 space (MSI space) */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003718 phb->ioda.m32_size += 0x10000;
3719
Gavin Shan92b8f132016-05-03 15:41:24 +10003720 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num;
Benjamin Herrenschmidt3fd47f02013-05-06 13:40:40 +10003721 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003722 phb->ioda.io_size = hose->pci_io_size;
Gavin Shan92b8f132016-05-03 15:41:24 +10003723 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003724 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
3725
Gavin Shan2b923ed2016-05-05 12:04:16 +10003726 /* Calculate how many 32-bit TCE segments we have */
3727 phb->ioda.dma32_count = phb->ioda.m32_pci_base /
3728 PNV_IODA1_DMA32_SEGSIZE;
3729
Gavin Shanc35d2a82013-07-31 16:47:04 +08003730 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
Alexey Kardashevskiy92a86752016-05-12 15:47:09 +10003731 size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
3732 sizeof(unsigned long));
Gavin Shan93289d82016-05-03 15:41:29 +10003733 m64map_off = size;
3734 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003735 m32map_off = size;
Gavin Shan92b8f132016-05-03 15:41:24 +10003736 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]);
Gavin Shanc35d2a82013-07-31 16:47:04 +08003737 if (phb->type == PNV_PHB_IODA1) {
3738 iomap_off = size;
Gavin Shan92b8f132016-05-03 15:41:24 +10003739 size += phb->ioda.total_pe_num * sizeof(phb->ioda.io_segmap[0]);
Gavin Shan2b923ed2016-05-05 12:04:16 +10003740 dma32map_off = size;
3741 size += phb->ioda.dma32_count *
3742 sizeof(phb->ioda.dma32_segmap[0]);
Gavin Shanc35d2a82013-07-31 16:47:04 +08003743 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003744 pemap_off = size;
Gavin Shan92b8f132016-05-03 15:41:24 +10003745 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
Michael Ellermane39f223f2014-11-18 16:47:35 +11003746 aux = memblock_virt_alloc(size, 0);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003747 phb->ioda.pe_alloc = aux;
Gavin Shan93289d82016-05-03 15:41:29 +10003748 phb->ioda.m64_segmap = aux + m64map_off;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003749 phb->ioda.m32_segmap = aux + m32map_off;
Gavin Shan93289d82016-05-03 15:41:29 +10003750 for (segno = 0; segno < phb->ioda.total_pe_num; segno++) {
3751 phb->ioda.m64_segmap[segno] = IODA_INVALID_PE;
Gavin Shan3fa23ff2016-05-03 15:41:26 +10003752 phb->ioda.m32_segmap[segno] = IODA_INVALID_PE;
Gavin Shan93289d82016-05-03 15:41:29 +10003753 }
Gavin Shan3fa23ff2016-05-03 15:41:26 +10003754 if (phb->type == PNV_PHB_IODA1) {
Gavin Shanc35d2a82013-07-31 16:47:04 +08003755 phb->ioda.io_segmap = aux + iomap_off;
Gavin Shan3fa23ff2016-05-03 15:41:26 +10003756 for (segno = 0; segno < phb->ioda.total_pe_num; segno++)
3757 phb->ioda.io_segmap[segno] = IODA_INVALID_PE;
Gavin Shan2b923ed2016-05-05 12:04:16 +10003758
3759 phb->ioda.dma32_segmap = aux + dma32map_off;
3760 for (segno = 0; segno < phb->ioda.dma32_count; segno++)
3761 phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
Gavin Shan3fa23ff2016-05-03 15:41:26 +10003762 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003763 phb->ioda.pe_array = aux + pemap_off;
Gavin Shan63803c32016-05-20 16:41:32 +10003764
3765 /*
3766 * Choose PE number for root bus, which shouldn't have
3767 * M64 resources consumed by its child devices. To pick
3768 * the PE number adjacent to the reserved one if possible.
3769 */
3770 pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
3771 if (phb->ioda.reserved_pe_idx == 0) {
3772 phb->ioda.root_pe_idx = 1;
3773 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3774 } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
3775 phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
3776 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
3777 } else {
3778 phb->ioda.root_pe_idx = IODA_INVALID_PE;
3779 }
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003780
3781 INIT_LIST_HEAD(&phb->ioda.pe_list);
Wei Yang781a8682015-03-25 16:23:57 +08003782 mutex_init(&phb->ioda.pe_list_mutex);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003783
3784 /* Calculate how many 32-bit TCE segments we have */
Gavin Shan2b923ed2016-05-05 12:04:16 +10003785 phb->ioda.dma32_count = phb->ioda.m32_pci_base /
Gavin Shanacce9712016-05-03 15:41:33 +10003786 PNV_IODA1_DMA32_SEGSIZE;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003787
Gavin Shanaa0c0332013-04-25 19:20:57 +00003788#if 0 /* We should really do that ... */
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003789 rc = opal_pci_set_phb_mem_window(opal->phb_id,
3790 window_type,
3791 window_num,
3792 starting_real_address,
3793 starting_pci_address,
3794 segment_size);
3795#endif
3796
Guo Chao262af552014-07-21 14:42:30 +10003797 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
Gavin Shan92b8f132016-05-03 15:41:24 +10003798 phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx,
Guo Chao262af552014-07-21 14:42:30 +10003799 phb->ioda.m32_size, phb->ioda.m32_segsize);
3800 if (phb->ioda.m64_size)
3801 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
3802 phb->ioda.m64_size, phb->ioda.m64_segsize);
3803 if (phb->ioda.io_size)
3804 pr_info(" IO: 0x%x [segment=0x%x]\n",
3805 phb->ioda.io_size, phb->ioda.io_segsize);
3806
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003807
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003808 phb->hose->ops = &pnv_pci_ops;
Gavin Shan49dec922014-07-21 14:42:33 +10003809 phb->get_pe_state = pnv_ioda_get_pe_state;
3810 phb->freeze_pe = pnv_ioda_freeze_pe;
3811 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003812
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003813 /* Setup MSI support */
3814 pnv_pci_init_ioda_msis(phb);
3815
Gavin Shanc40a4212012-08-20 03:49:20 +00003816 /*
3817 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
3818 * to let the PCI core do resource assignment. It's supposed
3819 * that the PCI core will do correct I/O and MMIO alignment
3820 * for the P2P bridge bars so that each PCI bus (excluding
3821 * the child P2P bridges) can form individual PE.
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003822 */
Gavin Shanfb446ad2012-08-20 03:49:14 +00003823 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
Alistair Popple5d2aa712015-12-17 13:43:13 +11003824
Alexey Kardashevskiyf9f83452016-04-29 18:55:20 +10003825 if (phb->type == PNV_PHB_NPU) {
Alistair Popple5d2aa712015-12-17 13:43:13 +11003826 hose->controller_ops = pnv_npu_ioda_controller_ops;
Alexey Kardashevskiyf9f83452016-04-29 18:55:20 +10003827 } else {
3828 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
Alistair Popple5d2aa712015-12-17 13:43:13 +11003829 hose->controller_ops = pnv_pci_ioda_controller_ops;
Alexey Kardashevskiyf9f83452016-04-29 18:55:20 +10003830 }
Michael Ellermanad30cb92015-04-14 09:29:23 +10003831
Wei Yang6e628c72015-03-25 16:23:55 +08003832#ifdef CONFIG_PCI_IOV
3833 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
Wei Yang5350ab32015-03-25 16:23:56 +08003834 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
Michael Ellermanad30cb92015-04-14 09:29:23 +10003835#endif
3836
Gavin Shanc40a4212012-08-20 03:49:20 +00003837 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003838
3839 /* Reset IODA tables to a clean state */
Gavin Shand1a85ee2014-09-30 12:39:05 +10003840 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003841 if (rc)
Benjamin Herrenschmidtf11fe552011-11-29 18:22:50 +00003842 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
Gavin Shan361f2a22014-04-24 18:00:25 +10003843
Andrew Donnellan6060e9e2016-09-16 20:39:44 +10003844 /*
3845 * If we're running in kdump kernel, the previous kernel never
Gavin Shan361f2a22014-04-24 18:00:25 +10003846 * shutdown PCI devices correctly. We already got IODA table
3847 * cleaned out. So we have to issue PHB reset to stop all PCI
Andrew Donnellan6060e9e2016-09-16 20:39:44 +10003848 * transactions from previous kernel.
Gavin Shan361f2a22014-04-24 18:00:25 +10003849 */
3850 if (is_kdump_kernel()) {
3851 pr_info(" Issue PHB reset ...\n");
Gavin Shancadf3642015-02-16 14:45:47 +11003852 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
3853 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
Gavin Shan361f2a22014-04-24 18:00:25 +10003854 }
Guo Chao262af552014-07-21 14:42:30 +10003855
Gavin Shan9e9e8932014-11-12 13:36:05 +11003856 /* Remove M64 resource if we can't configure it successfully */
3857 if (!phb->init_m64 || phb->init_m64(phb))
Guo Chao262af552014-07-21 14:42:30 +10003858 hose->mem_resources[1].flags = 0;
Gavin Shanaa0c0332013-04-25 19:20:57 +00003859}
3860
Bjorn Helgaas67975002013-07-02 12:20:03 -06003861void __init pnv_pci_init_ioda2_phb(struct device_node *np)
Gavin Shanaa0c0332013-04-25 19:20:57 +00003862{
Gavin Shane9cc17d2013-06-20 13:21:14 +08003863 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003864}
3865
Alistair Popple5d2aa712015-12-17 13:43:13 +11003866void __init pnv_pci_init_npu_phb(struct device_node *np)
3867{
3868 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU);
3869}
3870
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003871void __init pnv_pci_init_ioda_hub(struct device_node *np)
3872{
3873 struct device_node *phbn;
Alistair Popplec681b932013-09-23 12:04:57 +10003874 const __be64 *prop64;
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003875 u64 hub_id;
3876
3877 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
3878
3879 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
3880 if (!prop64) {
3881 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
3882 return;
3883 }
3884 hub_id = be64_to_cpup(prop64);
3885 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
3886
3887 /* Count child PHBs */
3888 for_each_child_of_node(np, phbn) {
3889 /* Look for IODA1 PHBs */
3890 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
Gavin Shane9cc17d2013-06-20 13:21:14 +08003891 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
Benjamin Herrenschmidt184cd4a2011-11-15 17:29:08 +00003892 }
3893}