blob: fddf847e71aceb2609b6c25f224e657b92f6cb69 [file] [log] [blame]
Jan Glaubercd248342012-11-29 12:50:30 +01001/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 *
7 * The System z PCI code is a rewrite from a prototype by
8 * the following people (Kudoz!):
Jan Glauberbedef752012-12-06 14:06:28 +01009 * Alexander Schmidt
10 * Christoph Raisch
11 * Hannes Hering
12 * Hoang-Nam Nguyen
13 * Jan-Bernd Themann
14 * Stefan Roscher
15 * Thomas Klein
Jan Glaubercd248342012-11-29 12:50:30 +010016 */
17
18#define COMPONENT "zPCI"
19#define pr_fmt(fmt) COMPONENT ": " fmt
20
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/err.h>
24#include <linux/export.h>
25#include <linux/delay.h>
Jan Glauber9a4da8a2012-11-29 13:05:05 +010026#include <linux/irq.h>
27#include <linux/kernel_stat.h>
Jan Glaubercd248342012-11-29 12:50:30 +010028#include <linux/seq_file.h>
29#include <linux/pci.h>
30#include <linux/msi.h>
31
Jan Glauber9a4da8a2012-11-29 13:05:05 +010032#include <asm/isc.h>
33#include <asm/airq.h>
Jan Glaubercd248342012-11-29 12:50:30 +010034#include <asm/facility.h>
35#include <asm/pci_insn.h>
Jan Glaubera755a452012-11-29 12:55:21 +010036#include <asm/pci_clp.h>
Jan Glauber828b35f2012-11-29 14:33:30 +010037#include <asm/pci_dma.h>
Jan Glaubercd248342012-11-29 12:50:30 +010038
39#define DEBUG /* enable pr_debug */
40
Jan Glauber9a4da8a2012-11-29 13:05:05 +010041#define SIC_IRQ_MODE_ALL 0
42#define SIC_IRQ_MODE_SINGLE 1
43
Jan Glaubercd248342012-11-29 12:50:30 +010044#define ZPCI_NR_DMA_SPACES 1
Jan Glauber9a4da8a2012-11-29 13:05:05 +010045#define ZPCI_MSI_VEC_BITS 6
Jan Glaubercd248342012-11-29 12:50:30 +010046#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
47
48/* list of all detected zpci devices */
49LIST_HEAD(zpci_list);
Jan Glauber7441b062012-11-29 14:35:47 +010050EXPORT_SYMBOL_GPL(zpci_list);
Jan Glaubercd248342012-11-29 12:50:30 +010051DEFINE_MUTEX(zpci_list_lock);
Jan Glauber7441b062012-11-29 14:35:47 +010052EXPORT_SYMBOL_GPL(zpci_list_lock);
53
Sebastian Ott53923352013-01-31 19:55:17 +010054static struct pci_hp_callback_ops *hotplug_ops;
Jan Glaubercd248342012-11-29 12:50:30 +010055
56static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
57static DEFINE_SPINLOCK(zpci_domain_lock);
58
Jan Glauber9a4da8a2012-11-29 13:05:05 +010059struct callback {
60 irq_handler_t handler;
61 void *data;
62};
63
64struct zdev_irq_map {
65 unsigned long aibv; /* AI bit vector */
66 int msi_vecs; /* consecutive MSI-vectors used */
67 int __unused;
68 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
69 spinlock_t lock; /* protect callbacks against de-reg */
70};
71
72struct intr_bucket {
73 /* amap of adapters, one bit per dev, corresponds to one irq nr */
74 unsigned long *alloc;
75 /* AI summary bit, global page for all devices */
76 unsigned long *aisb;
77 /* pointer to aibv and callback data in zdev */
78 struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
79 /* protects the whole bucket struct */
80 spinlock_t lock;
81};
82
83static struct intr_bucket *bucket;
84
85/* Adapter local summary indicator */
86static u8 *zpci_irq_si;
87
88static atomic_t irq_retries = ATOMIC_INIT(0);
89
Jan Glaubercd248342012-11-29 12:50:30 +010090/* I/O Map */
91static DEFINE_SPINLOCK(zpci_iomap_lock);
92static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
93struct zpci_iomap_entry *zpci_iomap_start;
94EXPORT_SYMBOL_GPL(zpci_iomap_start);
95
Jan Glauber9a4da8a2012-11-29 13:05:05 +010096/* highest irq summary bit */
97static int __read_mostly aisb_max;
98
99static struct kmem_cache *zdev_irq_cache;
Jan Glauberd0b08852012-12-11 14:53:35 +0100100static struct kmem_cache *zdev_fmb_cache;
101
102debug_info_t *pci_debug_msg_id;
103debug_info_t *pci_debug_err_id;
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100104
105static inline int irq_to_msi_nr(unsigned int irq)
106{
107 return irq & ZPCI_MSI_MASK;
108}
109
110static inline int irq_to_dev_nr(unsigned int irq)
111{
112 return irq >> ZPCI_MSI_VEC_BITS;
113}
114
115static inline struct zdev_irq_map *get_imap(unsigned int irq)
116{
117 return bucket->imap[irq_to_dev_nr(irq)];
118}
119
Jan Glaubercd248342012-11-29 12:50:30 +0100120struct zpci_dev *get_zdev(struct pci_dev *pdev)
121{
122 return (struct zpci_dev *) pdev->sysdata;
123}
124
125struct zpci_dev *get_zdev_by_fid(u32 fid)
126{
127 struct zpci_dev *tmp, *zdev = NULL;
128
129 mutex_lock(&zpci_list_lock);
130 list_for_each_entry(tmp, &zpci_list, entry) {
131 if (tmp->fid == fid) {
132 zdev = tmp;
133 break;
134 }
135 }
136 mutex_unlock(&zpci_list_lock);
137 return zdev;
138}
139
140bool zpci_fid_present(u32 fid)
141{
142 return (get_zdev_by_fid(fid) != NULL) ? true : false;
143}
144
145static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
146{
147 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
148}
149
150int pci_domain_nr(struct pci_bus *bus)
151{
152 return ((struct zpci_dev *) bus->sysdata)->domain;
153}
154EXPORT_SYMBOL_GPL(pci_domain_nr);
155
156int pci_proc_domain(struct pci_bus *bus)
157{
158 return pci_domain_nr(bus);
159}
160EXPORT_SYMBOL_GPL(pci_proc_domain);
161
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100162/* Modify PCI: Register adapter interruptions */
163static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
164 u64 aibv)
165{
166 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
167 struct zpci_fib *fib;
168 int rc;
169
170 fib = (void *) get_zeroed_page(GFP_KERNEL);
171 if (!fib)
172 return -ENOMEM;
173
174 fib->isc = PCI_ISC;
175 fib->noi = zdev->irq_map->msi_vecs;
176 fib->sum = 1; /* enable summary notifications */
177 fib->aibv = aibv;
178 fib->aibvo = 0; /* every function has its own page */
179 fib->aisb = (u64) bucket->aisb + aisb / 8;
180 fib->aisbo = aisb & ZPCI_MSI_MASK;
181
182 rc = mpcifc_instr(req, fib);
183 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
184
185 free_page((unsigned long) fib);
186 return rc;
187}
188
189struct mod_pci_args {
190 u64 base;
191 u64 limit;
192 u64 iota;
Jan Glauberd0b08852012-12-11 14:53:35 +0100193 u64 fmb_addr;
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100194};
195
196static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
197{
198 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
199 struct zpci_fib *fib;
200 int rc;
201
202 /* The FIB must be available even if it's not used */
203 fib = (void *) get_zeroed_page(GFP_KERNEL);
204 if (!fib)
205 return -ENOMEM;
206
207 fib->pba = args->base;
208 fib->pal = args->limit;
209 fib->iota = args->iota;
Jan Glauberd0b08852012-12-11 14:53:35 +0100210 fib->fmb_addr = args->fmb_addr;
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100211
212 rc = mpcifc_instr(req, fib);
213 free_page((unsigned long) fib);
214 return rc;
215}
216
Jan Glauber828b35f2012-11-29 14:33:30 +0100217/* Modify PCI: Register I/O address translation parameters */
218int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
219 u64 base, u64 limit, u64 iota)
220{
Jan Glauberd0b08852012-12-11 14:53:35 +0100221 struct mod_pci_args args = { base, limit, iota, 0 };
Jan Glauber828b35f2012-11-29 14:33:30 +0100222
223 WARN_ON_ONCE(iota & 0x3fff);
224 args.iota |= ZPCI_IOTA_RTTO_FLAG;
225 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
226}
227
228/* Modify PCI: Unregister I/O address translation parameters */
229int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
230{
Jan Glauberd0b08852012-12-11 14:53:35 +0100231 struct mod_pci_args args = { 0, 0, 0, 0 };
Jan Glauber828b35f2012-11-29 14:33:30 +0100232
233 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
234}
235
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100236/* Modify PCI: Unregister adapter interruptions */
237static int zpci_unregister_airq(struct zpci_dev *zdev)
238{
Jan Glauberd0b08852012-12-11 14:53:35 +0100239 struct mod_pci_args args = { 0, 0, 0, 0 };
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100240
241 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
242}
243
Jan Glauberd0b08852012-12-11 14:53:35 +0100244/* Modify PCI: Set PCI function measurement parameters */
245int zpci_fmb_enable_device(struct zpci_dev *zdev)
246{
247 struct mod_pci_args args = { 0, 0, 0, 0 };
248
249 if (zdev->fmb)
250 return -EINVAL;
251
Wei Yongjun08b42122013-02-25 22:09:25 +0800252 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
Jan Glauberd0b08852012-12-11 14:53:35 +0100253 if (!zdev->fmb)
254 return -ENOMEM;
Jan Glauberd0b08852012-12-11 14:53:35 +0100255 WARN_ON((u64) zdev->fmb & 0xf);
256
257 args.fmb_addr = virt_to_phys(zdev->fmb);
258 return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
259}
260
261/* Modify PCI: Disable PCI function measurement */
262int zpci_fmb_disable_device(struct zpci_dev *zdev)
263{
264 struct mod_pci_args args = { 0, 0, 0, 0 };
265 int rc;
266
267 if (!zdev->fmb)
268 return -EINVAL;
269
270 /* Function measurement is disabled if fmb address is zero */
271 rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
272
273 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
274 zdev->fmb = NULL;
275 return rc;
276}
277
Jan Glaubercd248342012-11-29 12:50:30 +0100278#define ZPCI_PCIAS_CFGSPC 15
279
280static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
281{
282 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
283 u64 data;
284 int rc;
285
286 rc = pcilg_instr(&data, req, offset);
287 data = data << ((8 - len) * 8);
288 data = le64_to_cpu(data);
289 if (!rc)
290 *val = (u32) data;
291 else
292 *val = 0xffffffff;
293 return rc;
294}
295
296static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
297{
298 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
299 u64 data = val;
300 int rc;
301
302 data = cpu_to_le64(data);
303 data = data >> ((8 - len) * 8);
304 rc = pcistg_instr(data, req, offset);
305 return rc;
306}
307
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100308void synchronize_irq(unsigned int irq)
309{
310 /*
311 * Not needed, the handler is protected by a lock and IRQs that occur
312 * after the handler is deleted are just NOPs.
313 */
314}
315EXPORT_SYMBOL_GPL(synchronize_irq);
316
317void enable_irq(unsigned int irq)
318{
319 struct msi_desc *msi = irq_get_msi_desc(irq);
320
321 zpci_msi_set_mask_bits(msi, 1, 0);
322}
323EXPORT_SYMBOL_GPL(enable_irq);
324
325void disable_irq(unsigned int irq)
326{
327 struct msi_desc *msi = irq_get_msi_desc(irq);
328
329 zpci_msi_set_mask_bits(msi, 1, 1);
330}
331EXPORT_SYMBOL_GPL(disable_irq);
332
333void disable_irq_nosync(unsigned int irq)
334{
335 disable_irq(irq);
336}
337EXPORT_SYMBOL_GPL(disable_irq_nosync);
338
339unsigned long probe_irq_on(void)
340{
341 return 0;
342}
343EXPORT_SYMBOL_GPL(probe_irq_on);
344
345int probe_irq_off(unsigned long val)
346{
347 return 0;
348}
349EXPORT_SYMBOL_GPL(probe_irq_off);
350
351unsigned int probe_irq_mask(unsigned long val)
352{
353 return val;
354}
355EXPORT_SYMBOL_GPL(probe_irq_mask);
356
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -0800357void pcibios_fixup_bus(struct pci_bus *bus)
Jan Glaubercd248342012-11-29 12:50:30 +0100358{
359}
360
361resource_size_t pcibios_align_resource(void *data, const struct resource *res,
362 resource_size_t size,
363 resource_size_t align)
364{
365 return 0;
366}
367
Jan Glauber87bc3592012-12-06 14:30:28 +0100368/* combine single writes by using store-block insn */
369void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
370{
371 zpci_memcpy_toio(to, from, count);
372}
373
Jan Glaubercd248342012-11-29 12:50:30 +0100374/* Create a virtual mapping cookie for a PCI BAR */
375void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
376{
377 struct zpci_dev *zdev = get_zdev(pdev);
378 u64 addr;
379 int idx;
380
381 if ((bar & 7) != bar)
382 return NULL;
383
384 idx = zdev->bars[bar].map_idx;
385 spin_lock(&zpci_iomap_lock);
386 zpci_iomap_start[idx].fh = zdev->fh;
387 zpci_iomap_start[idx].bar = bar;
388 spin_unlock(&zpci_iomap_lock);
389
390 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
391 return (void __iomem *) addr;
392}
393EXPORT_SYMBOL_GPL(pci_iomap);
394
395void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
396{
397 unsigned int idx;
398
399 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
400 spin_lock(&zpci_iomap_lock);
401 zpci_iomap_start[idx].fh = 0;
402 zpci_iomap_start[idx].bar = 0;
403 spin_unlock(&zpci_iomap_lock);
404}
405EXPORT_SYMBOL_GPL(pci_iounmap);
406
407static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
408 int size, u32 *val)
409{
410 struct zpci_dev *zdev = get_zdev_by_bus(bus);
411
412 if (!zdev || devfn != ZPCI_DEVFN)
413 return 0;
414 return zpci_cfg_load(zdev, where, val, size);
415}
416
417static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
418 int size, u32 val)
419{
420 struct zpci_dev *zdev = get_zdev_by_bus(bus);
421
422 if (!zdev || devfn != ZPCI_DEVFN)
423 return 0;
424 return zpci_cfg_store(zdev, where, val, size);
425}
426
427static struct pci_ops pci_root_ops = {
428 .read = pci_read,
429 .write = pci_write,
430};
431
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100432/* store the last handled bit to implement fair scheduling of devices */
433static DEFINE_PER_CPU(unsigned long, next_sbit);
434
435static void zpci_irq_handler(void *dont, void *need)
436{
437 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
438 int rescan = 0, max = aisb_max;
439 struct zdev_irq_map *imap;
440
Heiko Carstens420f42e2013-01-02 15:18:18 +0100441 inc_irq_stat(IRQIO_PCI);
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100442 sbit = start;
443
444scan:
445 /* find summary_bit */
446 for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
447 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
448 last = sbit;
449
450 /* find vector bit */
451 imap = bucket->imap[sbit];
452 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
Heiko Carstens420f42e2013-01-02 15:18:18 +0100453 inc_irq_stat(IRQIO_MSI);
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100454 clear_bit(63 - mbit, &imap->aibv);
455
456 spin_lock(&imap->lock);
457 if (imap->cb[mbit].handler)
458 imap->cb[mbit].handler(mbit,
459 imap->cb[mbit].data);
460 spin_unlock(&imap->lock);
461 }
462 }
463
464 if (rescan)
465 goto out;
466
467 /* scan the skipped bits */
468 if (start > 0) {
469 sbit = 0;
470 max = start;
471 start = 0;
472 goto scan;
473 }
474
475 /* enable interrupts again */
476 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
477
478 /* check again to not lose initiative */
479 rmb();
480 max = aisb_max;
481 sbit = find_first_bit_left(bucket->aisb, max);
482 if (sbit != max) {
483 atomic_inc(&irq_retries);
484 rescan++;
485 goto scan;
486 }
487out:
488 /* store next device bit to scan */
489 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
490}
491
492/* msi_vecs - number of requested interrupts, 0 place function to error state */
493static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
494{
495 struct zpci_dev *zdev = get_zdev(pdev);
496 unsigned int aisb, msi_nr;
497 struct msi_desc *msi;
498 int rc;
499
500 /* store the number of used MSI vectors */
501 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
502
503 spin_lock(&bucket->lock);
504 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
505 /* alloc map exhausted? */
506 if (aisb == PAGE_SIZE) {
507 spin_unlock(&bucket->lock);
508 return -EIO;
509 }
510 set_bit(aisb, bucket->alloc);
511 spin_unlock(&bucket->lock);
512
513 zdev->aisb = aisb;
514 if (aisb + 1 > aisb_max)
515 aisb_max = aisb + 1;
516
517 /* wire up IRQ shortcut pointer */
518 bucket->imap[zdev->aisb] = zdev->irq_map;
519 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
520
521 /* TODO: irq number 0 wont be found if we return less than requested MSIs.
522 * ignore it for now and fix in common code.
523 */
524 msi_nr = aisb << ZPCI_MSI_VEC_BITS;
525
526 list_for_each_entry(msi, &pdev->msi_list, list) {
527 rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
528 aisb << ZPCI_MSI_VEC_BITS);
529 if (rc)
530 return rc;
531 msi_nr++;
532 }
533
534 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
535 if (rc) {
536 clear_bit(aisb, bucket->alloc);
537 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
538 return rc;
539 }
540 return (zdev->irq_map->msi_vecs == msi_vecs) ?
541 0 : zdev->irq_map->msi_vecs;
542}
543
544static void zpci_teardown_msi(struct pci_dev *pdev)
545{
546 struct zpci_dev *zdev = get_zdev(pdev);
547 struct msi_desc *msi;
548 int aisb, rc;
549
550 rc = zpci_unregister_airq(zdev);
551 if (rc) {
552 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
553 return;
554 }
555
556 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
557 aisb = irq_to_dev_nr(msi->irq);
558
559 list_for_each_entry(msi, &pdev->msi_list, list)
560 zpci_teardown_msi_irq(zdev, msi);
561
562 clear_bit(aisb, bucket->alloc);
563 if (aisb + 1 == aisb_max)
564 aisb_max--;
565}
566
567int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
568{
569 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
570 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
571 return -EINVAL;
572 return zpci_setup_msi(pdev, nvec);
573}
574
575void arch_teardown_msi_irqs(struct pci_dev *pdev)
576{
577 pr_info("%s: on pdev: %p\n", __func__, pdev);
578 zpci_teardown_msi(pdev);
579}
580
Jan Glaubercd248342012-11-29 12:50:30 +0100581static void zpci_map_resources(struct zpci_dev *zdev)
582{
583 struct pci_dev *pdev = zdev->pdev;
584 resource_size_t len;
585 int i;
586
587 for (i = 0; i < PCI_BAR_COUNT; i++) {
588 len = pci_resource_len(pdev, i);
589 if (!len)
590 continue;
591 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
592 pdev->resource[i].end = pdev->resource[i].start + len - 1;
593 pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
594 i, pdev->resource[i].start, pdev->resource[i].end);
595 }
596};
597
598static void zpci_unmap_resources(struct pci_dev *pdev)
599{
600 resource_size_t len;
601 int i;
602
603 for (i = 0; i < PCI_BAR_COUNT; i++) {
604 len = pci_resource_len(pdev, i);
605 if (!len)
606 continue;
607 pci_iounmap(pdev, (void *) pdev->resource[i].start);
608 }
609};
610
611struct zpci_dev *zpci_alloc_device(void)
612{
613 struct zpci_dev *zdev;
614
615 /* Alloc memory for our private pci device data */
616 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
617 if (!zdev)
618 return ERR_PTR(-ENOMEM);
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100619
620 /* Alloc aibv & callback space */
Wei Yongjun4118fee2012-12-03 16:11:34 +0100621 zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100622 if (!zdev->irq_map)
623 goto error;
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100624 WARN_ON((u64) zdev->irq_map & 0xff);
Jan Glaubercd248342012-11-29 12:50:30 +0100625 return zdev;
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100626
627error:
628 kfree(zdev);
629 return ERR_PTR(-ENOMEM);
Jan Glaubercd248342012-11-29 12:50:30 +0100630}
631
632void zpci_free_device(struct zpci_dev *zdev)
633{
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100634 kmem_cache_free(zdev_irq_cache, zdev->irq_map);
Jan Glaubercd248342012-11-29 12:50:30 +0100635 kfree(zdev);
636}
637
638/* Called on removal of pci_dev, leaves zpci and bus device */
639static void zpci_remove_device(struct pci_dev *pdev)
640{
641 struct zpci_dev *zdev = get_zdev(pdev);
642
643 dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
644 zdev->state = ZPCI_FN_STATE_CONFIGURED;
Jan Glauber828b35f2012-11-29 14:33:30 +0100645 zpci_dma_exit_device(zdev);
Jan Glauberd0b08852012-12-11 14:53:35 +0100646 zpci_fmb_disable_device(zdev);
Jan Glauber1e8da952012-11-29 14:36:55 +0100647 zpci_sysfs_remove_device(&pdev->dev);
Jan Glaubercd248342012-11-29 12:50:30 +0100648 zpci_unmap_resources(pdev);
649 list_del(&zdev->entry); /* can be called from init */
650 zdev->pdev = NULL;
651}
652
653static void zpci_scan_devices(void)
654{
655 struct zpci_dev *zdev;
656
657 mutex_lock(&zpci_list_lock);
658 list_for_each_entry(zdev, &zpci_list, entry)
659 if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
660 zpci_scan_device(zdev);
661 mutex_unlock(&zpci_list_lock);
662}
663
664/*
665 * Too late for any s390 specific setup, since interrupts must be set up
666 * already which requires DMA setup too and the pci scan will access the
667 * config space, which only works if the function handle is enabled.
668 */
669int pcibios_enable_device(struct pci_dev *pdev, int mask)
670{
671 struct resource *res;
672 u16 cmd;
673 int i;
674
675 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
676
677 for (i = 0; i < PCI_BAR_COUNT; i++) {
678 res = &pdev->resource[i];
679
680 if (res->flags & IORESOURCE_IO)
681 return -EINVAL;
682
683 if (res->flags & IORESOURCE_MEM)
684 cmd |= PCI_COMMAND_MEMORY;
685 }
686 pci_write_config_word(pdev, PCI_COMMAND, cmd);
687 return 0;
688}
689
690void pcibios_disable_device(struct pci_dev *pdev)
691{
692 zpci_remove_device(pdev);
693 pdev->sysdata = NULL;
694}
695
Jan Glauber1e8da952012-11-29 14:36:55 +0100696int pcibios_add_platform_entries(struct pci_dev *pdev)
697{
698 return zpci_sysfs_add_device(&pdev->dev);
699}
700
Jan Glauber9a4da8a2012-11-29 13:05:05 +0100701int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
702{
703 int msi_nr = irq_to_msi_nr(irq);
704 struct zdev_irq_map *imap;
705 struct msi_desc *msi;
706
707 msi = irq_get_msi_desc(irq);
708 if (!msi)
709 return -EIO;
710
711 imap = get_imap(irq);
712 spin_lock_init(&imap->lock);
713
714 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
715 imap->cb[msi_nr].handler = handler;
716 imap->cb[msi_nr].data = data;
717
718 /*
719 * The generic MSI code returns with the interrupt disabled on the
720 * card, using the MSI mask bits. Firmware doesn't appear to unmask
721 * at that level, so we do it here by hand.
722 */
723 zpci_msi_set_mask_bits(msi, 1, 0);
724 return 0;
725}
726
727void zpci_free_irq(unsigned int irq)
728{
729 struct zdev_irq_map *imap = get_imap(irq);
730 int msi_nr = irq_to_msi_nr(irq);
731 unsigned long flags;
732
733 pr_debug("%s: for irq: %d\n", __func__, irq);
734
735 spin_lock_irqsave(&imap->lock, flags);
736 imap->cb[msi_nr].handler = NULL;
737 imap->cb[msi_nr].data = NULL;
738 spin_unlock_irqrestore(&imap->lock, flags);
739}
740
741int request_irq(unsigned int irq, irq_handler_t handler,
742 unsigned long irqflags, const char *devname, void *dev_id)
743{
744 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
745 __func__, irq, handler, irqflags, devname);
746
747 return zpci_request_irq(irq, handler, dev_id);
748}
749EXPORT_SYMBOL_GPL(request_irq);
750
751void free_irq(unsigned int irq, void *dev_id)
752{
753 zpci_free_irq(irq);
754}
755EXPORT_SYMBOL_GPL(free_irq);
756
757static int __init zpci_irq_init(void)
758{
759 int cpu, rc;
760
761 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
762 if (!bucket)
763 return -ENOMEM;
764
765 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
766 if (!bucket->aisb) {
767 rc = -ENOMEM;
768 goto out_aisb;
769 }
770
771 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
772 if (!bucket->alloc) {
773 rc = -ENOMEM;
774 goto out_alloc;
775 }
776
777 isc_register(PCI_ISC);
778 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC);
779 if (IS_ERR(zpci_irq_si)) {
780 rc = PTR_ERR(zpci_irq_si);
781 zpci_irq_si = NULL;
782 goto out_ai;
783 }
784
785 for_each_online_cpu(cpu)
786 per_cpu(next_sbit, cpu) = 0;
787
788 spin_lock_init(&bucket->lock);
789 /* set summary to 1 to be called every time for the ISC */
790 *zpci_irq_si = 1;
791 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
792 return 0;
793
794out_ai:
795 isc_unregister(PCI_ISC);
796 free_page((unsigned long) bucket->alloc);
797out_alloc:
798 free_page((unsigned long) bucket->aisb);
799out_aisb:
800 kfree(bucket);
801 return rc;
802}
803
804static void zpci_irq_exit(void)
805{
806 free_page((unsigned long) bucket->alloc);
807 free_page((unsigned long) bucket->aisb);
808 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC);
809 isc_unregister(PCI_ISC);
810 kfree(bucket);
811}
812
Jan Glauberd0b08852012-12-11 14:53:35 +0100813void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m)
814{
815 if (!zdev)
816 return;
817
818 seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries));
819 seq_printf(m, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n",
820 get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb);
821}
822
Jan Glaubercd248342012-11-29 12:50:30 +0100823static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
824 unsigned long flags, int domain)
825{
826 struct resource *r;
827 char *name;
828 int rc;
829
830 r = kzalloc(sizeof(*r), GFP_KERNEL);
831 if (!r)
832 return ERR_PTR(-ENOMEM);
833 r->start = start;
834 r->end = r->start + size - 1;
835 r->flags = flags;
836 r->parent = &iomem_resource;
837 name = kmalloc(18, GFP_KERNEL);
838 if (!name) {
839 kfree(r);
840 return ERR_PTR(-ENOMEM);
841 }
842 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
843 r->name = name;
844
845 rc = request_resource(&iomem_resource, r);
846 if (rc)
847 pr_debug("request resource %pR failed\n", r);
848 return r;
849}
850
851static int zpci_alloc_iomap(struct zpci_dev *zdev)
852{
853 int entry;
854
855 spin_lock(&zpci_iomap_lock);
856 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
857 if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
858 spin_unlock(&zpci_iomap_lock);
859 return -ENOSPC;
860 }
861 set_bit(entry, zpci_iomap);
862 spin_unlock(&zpci_iomap_lock);
863 return entry;
864}
865
866static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
867{
868 spin_lock(&zpci_iomap_lock);
869 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
870 clear_bit(entry, zpci_iomap);
871 spin_unlock(&zpci_iomap_lock);
872}
873
874static int zpci_create_device_bus(struct zpci_dev *zdev)
875{
876 struct resource *res;
877 LIST_HEAD(resources);
878 int i;
879
880 /* allocate mapping entry for each used bar */
881 for (i = 0; i < PCI_BAR_COUNT; i++) {
882 unsigned long addr, size, flags;
883 int entry;
884
885 if (!zdev->bars[i].size)
886 continue;
887 entry = zpci_alloc_iomap(zdev);
888 if (entry < 0)
889 return entry;
890 zdev->bars[i].map_idx = entry;
891
892 /* only MMIO is supported */
893 flags = IORESOURCE_MEM;
894 if (zdev->bars[i].val & 8)
895 flags |= IORESOURCE_PREFETCH;
896 if (zdev->bars[i].val & 4)
897 flags |= IORESOURCE_MEM_64;
898
899 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
900
901 size = 1UL << zdev->bars[i].size;
902
903 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
904 if (IS_ERR(res)) {
905 zpci_free_iomap(zdev, entry);
906 return PTR_ERR(res);
907 }
908 pci_add_resource(&resources, res);
909 }
910
911 zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
912 zdev, &resources);
913 if (!zdev->bus)
914 return -EIO;
915
916 zdev->bus->max_bus_speed = zdev->max_bus_speed;
917 return 0;
918}
919
920static int zpci_alloc_domain(struct zpci_dev *zdev)
921{
922 spin_lock(&zpci_domain_lock);
923 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
924 if (zdev->domain == ZPCI_NR_DEVICES) {
925 spin_unlock(&zpci_domain_lock);
926 return -ENOSPC;
927 }
928 set_bit(zdev->domain, zpci_domain);
929 spin_unlock(&zpci_domain_lock);
930 return 0;
931}
932
933static void zpci_free_domain(struct zpci_dev *zdev)
934{
935 spin_lock(&zpci_domain_lock);
936 clear_bit(zdev->domain, zpci_domain);
937 spin_unlock(&zpci_domain_lock);
938}
939
Jan Glaubera755a452012-11-29 12:55:21 +0100940int zpci_enable_device(struct zpci_dev *zdev)
941{
942 int rc;
943
944 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
945 if (rc)
946 goto out;
947 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
Jan Glauber828b35f2012-11-29 14:33:30 +0100948
949 rc = zpci_dma_init_device(zdev);
950 if (rc)
951 goto out_dma;
Jan Glaubera755a452012-11-29 12:55:21 +0100952 return 0;
Jan Glauber828b35f2012-11-29 14:33:30 +0100953
954out_dma:
955 clp_disable_fh(zdev);
Jan Glaubera755a452012-11-29 12:55:21 +0100956out:
957 return rc;
958}
959EXPORT_SYMBOL_GPL(zpci_enable_device);
960
Jan Glaubercd248342012-11-29 12:50:30 +0100961int zpci_create_device(struct zpci_dev *zdev)
962{
963 int rc;
964
965 rc = zpci_alloc_domain(zdev);
966 if (rc)
967 goto out;
968
969 rc = zpci_create_device_bus(zdev);
970 if (rc)
971 goto out_bus;
972
973 mutex_lock(&zpci_list_lock);
974 list_add_tail(&zdev->entry, &zpci_list);
Sebastian Ott53923352013-01-31 19:55:17 +0100975 if (hotplug_ops)
976 hotplug_ops->create_slot(zdev);
Jan Glaubercd248342012-11-29 12:50:30 +0100977 mutex_unlock(&zpci_list_lock);
978
979 if (zdev->state == ZPCI_FN_STATE_STANDBY)
980 return 0;
981
Jan Glaubera755a452012-11-29 12:55:21 +0100982 rc = zpci_enable_device(zdev);
983 if (rc)
984 goto out_start;
Jan Glaubercd248342012-11-29 12:50:30 +0100985 return 0;
986
Jan Glaubera755a452012-11-29 12:55:21 +0100987out_start:
988 mutex_lock(&zpci_list_lock);
989 list_del(&zdev->entry);
Sebastian Ott53923352013-01-31 19:55:17 +0100990 if (hotplug_ops)
991 hotplug_ops->remove_slot(zdev);
Jan Glaubera755a452012-11-29 12:55:21 +0100992 mutex_unlock(&zpci_list_lock);
Jan Glaubercd248342012-11-29 12:50:30 +0100993out_bus:
994 zpci_free_domain(zdev);
995out:
996 return rc;
997}
998
999void zpci_stop_device(struct zpci_dev *zdev)
1000{
Jan Glauber828b35f2012-11-29 14:33:30 +01001001 zpci_dma_exit_device(zdev);
Jan Glaubercd248342012-11-29 12:50:30 +01001002 /*
1003 * Note: SCLP disables fh via set-pci-fn so don't
1004 * do that here.
1005 */
1006}
1007EXPORT_SYMBOL_GPL(zpci_stop_device);
1008
1009int zpci_scan_device(struct zpci_dev *zdev)
1010{
1011 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN);
1012 if (!zdev->pdev) {
1013 pr_err("pci_scan_single_device failed for fid: 0x%x\n",
1014 zdev->fid);
1015 goto out;
1016 }
1017
Jan Glauberd0b08852012-12-11 14:53:35 +01001018 zpci_debug_init_device(zdev);
1019 zpci_fmb_enable_device(zdev);
Jan Glaubercd248342012-11-29 12:50:30 +01001020 zpci_map_resources(zdev);
1021 pci_bus_add_devices(zdev->bus);
1022
1023 /* now that pdev was added to the bus mark it as used */
1024 zdev->state = ZPCI_FN_STATE_ONLINE;
1025 return 0;
1026
1027out:
Jan Glauber828b35f2012-11-29 14:33:30 +01001028 zpci_dma_exit_device(zdev);
Jan Glaubera755a452012-11-29 12:55:21 +01001029 clp_disable_fh(zdev);
Jan Glaubercd248342012-11-29 12:50:30 +01001030 return -EIO;
1031}
1032EXPORT_SYMBOL_GPL(zpci_scan_device);
1033
1034static inline int barsize(u8 size)
1035{
1036 return (size) ? (1 << size) >> 10 : 0;
1037}
1038
1039static int zpci_mem_init(void)
1040{
Jan Glauber9a4da8a2012-11-29 13:05:05 +01001041 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
1042 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
1043 if (!zdev_irq_cache)
1044 goto error_zdev;
1045
Jan Glauberd0b08852012-12-11 14:53:35 +01001046 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
1047 16, 0, NULL);
1048 if (!zdev_fmb_cache)
1049 goto error_fmb;
1050
Jan Glaubercd248342012-11-29 12:50:30 +01001051 /* TODO: use realloc */
1052 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
1053 GFP_KERNEL);
1054 if (!zpci_iomap_start)
Jan Glauber9a4da8a2012-11-29 13:05:05 +01001055 goto error_iomap;
Jan Glaubercd248342012-11-29 12:50:30 +01001056 return 0;
1057
Jan Glauber9a4da8a2012-11-29 13:05:05 +01001058error_iomap:
Jan Glauberd0b08852012-12-11 14:53:35 +01001059 kmem_cache_destroy(zdev_fmb_cache);
1060error_fmb:
Jan Glauber9a4da8a2012-11-29 13:05:05 +01001061 kmem_cache_destroy(zdev_irq_cache);
Jan Glaubercd248342012-11-29 12:50:30 +01001062error_zdev:
1063 return -ENOMEM;
1064}
1065
1066static void zpci_mem_exit(void)
1067{
1068 kfree(zpci_iomap_start);
Jan Glauber9a4da8a2012-11-29 13:05:05 +01001069 kmem_cache_destroy(zdev_irq_cache);
Jan Glauberd0b08852012-12-11 14:53:35 +01001070 kmem_cache_destroy(zdev_fmb_cache);
Jan Glaubercd248342012-11-29 12:50:30 +01001071}
1072
Sebastian Ott53923352013-01-31 19:55:17 +01001073void zpci_register_hp_ops(struct pci_hp_callback_ops *ops)
1074{
1075 mutex_lock(&zpci_list_lock);
1076 hotplug_ops = ops;
1077 mutex_unlock(&zpci_list_lock);
1078}
1079EXPORT_SYMBOL_GPL(zpci_register_hp_ops);
1080
1081void zpci_deregister_hp_ops(void)
1082{
1083 mutex_lock(&zpci_list_lock);
1084 hotplug_ops = NULL;
1085 mutex_unlock(&zpci_list_lock);
1086}
1087EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
1088
Heiko Carstens1e5635d2013-01-30 15:52:16 +01001089unsigned int s390_pci_probe = 1;
1090EXPORT_SYMBOL_GPL(s390_pci_probe);
Jan Glaubercd248342012-11-29 12:50:30 +01001091
1092char * __init pcibios_setup(char *str)
1093{
1094 if (!strcmp(str, "off")) {
Heiko Carstens1e5635d2013-01-30 15:52:16 +01001095 s390_pci_probe = 0;
Jan Glaubercd248342012-11-29 12:50:30 +01001096 return NULL;
1097 }
1098 return str;
1099}
1100
1101static int __init pci_base_init(void)
1102{
1103 int rc;
1104
Heiko Carstens1e5635d2013-01-30 15:52:16 +01001105 if (!s390_pci_probe)
Jan Glaubercd248342012-11-29 12:50:30 +01001106 return 0;
1107
1108 if (!test_facility(2) || !test_facility(69)
1109 || !test_facility(71) || !test_facility(72))
1110 return 0;
1111
1112 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
1113 test_facility(69), test_facility(70),
1114 test_facility(71));
1115
Jan Glauberd0b08852012-12-11 14:53:35 +01001116 rc = zpci_debug_init();
1117 if (rc)
1118 return rc;
1119
Jan Glaubercd248342012-11-29 12:50:30 +01001120 rc = zpci_mem_init();
1121 if (rc)
1122 goto out_mem;
1123
Jan Glauber9a4da8a2012-11-29 13:05:05 +01001124 rc = zpci_msihash_init();
1125 if (rc)
1126 goto out_hash;
1127
1128 rc = zpci_irq_init();
1129 if (rc)
1130 goto out_irq;
1131
Jan Glauber828b35f2012-11-29 14:33:30 +01001132 rc = zpci_dma_init();
1133 if (rc)
1134 goto out_dma;
1135
Jan Glaubera755a452012-11-29 12:55:21 +01001136 rc = clp_find_pci_devices();
1137 if (rc)
1138 goto out_find;
1139
Jan Glaubercd248342012-11-29 12:50:30 +01001140 zpci_scan_devices();
1141 return 0;
1142
Jan Glaubera755a452012-11-29 12:55:21 +01001143out_find:
Jan Glauber828b35f2012-11-29 14:33:30 +01001144 zpci_dma_exit();
1145out_dma:
Jan Glauber9a4da8a2012-11-29 13:05:05 +01001146 zpci_irq_exit();
1147out_irq:
1148 zpci_msihash_exit();
1149out_hash:
Jan Glaubercd248342012-11-29 12:50:30 +01001150 zpci_mem_exit();
1151out_mem:
Jan Glauberd0b08852012-12-11 14:53:35 +01001152 zpci_debug_exit();
Jan Glaubercd248342012-11-29 12:50:30 +01001153 return rc;
1154}
1155subsys_initcall(pci_base_init);