blob: 3810ad11cfdfde683cfa591ad87564890485346b [file] [log] [blame]
Jon Masonfce8a7b2012-11-16 19:27:12 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -04008 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -040017 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -070018 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
45 * Intel PCIe NTB Linux driver
46 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
Allen Hubbee26a5842015-04-09 10:33:20 -040050
Jon Masonfce8a7b2012-11-16 19:27:12 -070051#include <linux/debugfs.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070052#include <linux/delay.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070053#include <linux/init.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56#include <linux/pci.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070057#include <linux/random.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070058#include <linux/slab.h>
Allen Hubbee26a5842015-04-09 10:33:20 -040059#include <linux/ntb.h>
60
Allen Hubbeec110bc2015-05-07 06:45:21 -040061#include "ntb_hw_intel.h"
Jon Masonfce8a7b2012-11-16 19:27:12 -070062
Allen Hubbee26a5842015-04-09 10:33:20 -040063#define NTB_NAME "ntb_hw_intel"
64#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
65#define NTB_VER "2.0"
Jon Masonfce8a7b2012-11-16 19:27:12 -070066
Allen Hubbee26a5842015-04-09 10:33:20 -040067MODULE_DESCRIPTION(NTB_DESC);
Jon Masonfce8a7b2012-11-16 19:27:12 -070068MODULE_VERSION(NTB_VER);
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_AUTHOR("Intel Corporation");
71
Allen Hubbee26a5842015-04-09 10:33:20 -040072#define bar0_off(base, bar) ((base) + ((bar) << 2))
73#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
Jon Masonfce8a7b2012-11-16 19:27:12 -070074
Allen Hubbee26a5842015-04-09 10:33:20 -040075static int b2b_mw_idx = -1;
76module_param(b2b_mw_idx, int, 0644);
77MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
78 "value of zero or positive starts from first mw idx, and a "
79 "negative value starts from last mw idx. Both sides MUST "
80 "set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -070081
Allen Hubbee26a5842015-04-09 10:33:20 -040082static unsigned int b2b_mw_share;
83module_param(b2b_mw_share, uint, 0644);
84MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
85 "ntb so that the peer ntb only occupies the first half of "
86 "the mw, so the second half can still be used as a mw. Both "
87 "sides MUST set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -070088
Allen Hubbee26a5842015-04-09 10:33:20 -040089static const struct intel_ntb_reg bwd_reg;
90static const struct intel_ntb_alt_reg bwd_pri_reg;
91static const struct intel_ntb_alt_reg bwd_sec_reg;
92static const struct intel_ntb_alt_reg bwd_b2b_reg;
93static const struct intel_ntb_xlat_reg bwd_pri_xlat;
94static const struct intel_ntb_xlat_reg bwd_sec_xlat;
95static const struct intel_ntb_reg snb_reg;
96static const struct intel_ntb_alt_reg snb_pri_reg;
97static const struct intel_ntb_alt_reg snb_sec_reg;
98static const struct intel_ntb_alt_reg snb_b2b_reg;
99static const struct intel_ntb_xlat_reg snb_pri_xlat;
100static const struct intel_ntb_xlat_reg snb_sec_xlat;
101static const struct intel_b2b_addr snb_b2b_usd_addr;
102static const struct intel_b2b_addr snb_b2b_dsd_addr;
103
104static const struct ntb_dev_ops intel_ntb_ops;
105
106static const struct file_operations intel_ntb_debugfs_info;
Jon Mason1517a3f2013-07-30 15:58:49 -0700107static struct dentry *debugfs_dir;
108
Allen Hubbee26a5842015-04-09 10:33:20 -0400109#ifndef ioread64
110#ifdef readq
111#define ioread64 readq
112#else
113#define ioread64 _ioread64
114static inline u64 _ioread64(void __iomem *mmio)
115{
116 u64 low, high;
Jon Mason113bf1c2012-11-16 18:52:57 -0700117
Allen Hubbee26a5842015-04-09 10:33:20 -0400118 low = ioread32(mmio);
119 high = ioread32(mmio + sizeof(u32));
120 return low | (high << 32);
121}
122#endif
123#endif
Jon Masonfce8a7b2012-11-16 19:27:12 -0700124
Allen Hubbee26a5842015-04-09 10:33:20 -0400125#ifndef iowrite64
126#ifdef writeq
127#define iowrite64 writeq
128#else
129#define iowrite64 _iowrite64
130static inline void _iowrite64(u64 val, void __iomem *mmio)
131{
132 iowrite32(val, mmio);
133 iowrite32(val >> 32, mmio + sizeof(u32));
134}
135#endif
136#endif
137
138static inline int pdev_is_bwd(struct pci_dev *pdev)
139{
140 switch (pdev->device) {
141 case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
142 return 1;
143 }
144 return 0;
145}
146
147static inline int pdev_is_snb(struct pci_dev *pdev)
148{
149 switch (pdev->device) {
150 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
151 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
152 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
153 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
154 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
155 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
156 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
157 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
158 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
159 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
160 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
161 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
162 return 1;
163 }
164 return 0;
165}
166
167static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
168{
169 ndev->unsafe_flags = 0;
170 ndev->unsafe_flags_ignore = 0;
171
172 /* Only B2B has a workaround to avoid SDOORBELL */
173 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
174 if (!ntb_topo_is_b2b(ndev->ntb.topo))
175 ndev->unsafe_flags |= NTB_UNSAFE_DB;
176
177 /* No low level workaround to avoid SB01BASE */
178 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
179 ndev->unsafe_flags |= NTB_UNSAFE_DB;
180 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
181 }
182}
183
184static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
185 unsigned long flag)
186{
187 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
188}
189
190static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
191 unsigned long flag)
192{
193 flag &= ndev->unsafe_flags;
194 ndev->unsafe_flags_ignore |= flag;
195
196 return !!flag;
197}
198
199static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
200{
201 if (idx < 0 || idx > ndev->mw_count)
202 return -EINVAL;
203 return ndev->reg->mw_bar[idx];
204}
205
206static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
207 phys_addr_t *db_addr, resource_size_t *db_size,
208 phys_addr_t reg_addr, unsigned long reg)
209{
210 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
211
212 if (db_addr) {
213 *db_addr = reg_addr + reg;
214 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
215 }
216
217 if (db_size) {
218 *db_size = ndev->reg->db_size;
219 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
220 }
221
222 return 0;
223}
224
225static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
226 void __iomem *mmio)
227{
228 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
229
230 return ndev->reg->db_ioread(mmio);
231}
232
233static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
234 void __iomem *mmio)
235{
236 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
237
238 if (db_bits & ~ndev->db_valid_mask)
239 return -EINVAL;
240
241 ndev->reg->db_iowrite(db_bits, mmio);
242
243 return 0;
244}
245
246static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
247 void __iomem *mmio)
248{
249 unsigned long irqflags;
250
251 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
252
253 if (db_bits & ~ndev->db_valid_mask)
254 return -EINVAL;
255
256 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
257 {
258 ndev->db_mask |= db_bits;
259 ndev->reg->db_iowrite(ndev->db_mask, mmio);
260 }
261 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
262
263 return 0;
264}
265
266static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
267 void __iomem *mmio)
268{
269 unsigned long irqflags;
270
271 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
272
273 if (db_bits & ~ndev->db_valid_mask)
274 return -EINVAL;
275
276 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
277 {
278 ndev->db_mask &= ~db_bits;
279 ndev->reg->db_iowrite(ndev->db_mask, mmio);
280 }
281 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
282
283 return 0;
284}
285
286static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
287{
288 u64 shift, mask;
289
290 shift = ndev->db_vec_shift;
291 mask = BIT_ULL(shift) - 1;
292
293 return mask << (shift * db_vector);
294}
295
296static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
297 phys_addr_t *spad_addr, phys_addr_t reg_addr,
298 unsigned long reg)
299{
300 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
301
302 if (idx < 0 || idx >= ndev->spad_count)
303 return -EINVAL;
304
305 if (spad_addr) {
306 *spad_addr = reg_addr + reg + (idx << 2);
307 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
308 }
309
310 return 0;
311}
312
313static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
314 void __iomem *mmio)
315{
316 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
317
318 if (idx < 0 || idx >= ndev->spad_count)
319 return 0;
320
321 return ioread32(mmio + (idx << 2));
322}
323
324static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
325 void __iomem *mmio)
326{
327 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
328
329 if (idx < 0 || idx >= ndev->spad_count)
330 return -EINVAL;
331
332 iowrite32(val, mmio + (idx << 2));
333
334 return 0;
335}
336
337static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
338{
339 u64 vec_mask;
340
341 vec_mask = ndev_vec_mask(ndev, vec);
342
343 dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
344
345 ndev->last_ts = jiffies;
346
347 if (vec_mask & ndev->db_link_mask) {
348 if (ndev->reg->poll_link(ndev))
349 ntb_link_event(&ndev->ntb);
350 }
351
352 if (vec_mask & ndev->db_valid_mask)
353 ntb_db_event(&ndev->ntb, vec);
354
355 return IRQ_HANDLED;
356}
357
358static irqreturn_t ndev_vec_isr(int irq, void *dev)
359{
360 struct intel_ntb_vec *nvec = dev;
361
362 return ndev_interrupt(nvec->ndev, nvec->num);
363}
364
365static irqreturn_t ndev_irq_isr(int irq, void *dev)
366{
367 struct intel_ntb_dev *ndev = dev;
368
369 return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
370}
371
372static int ndev_init_isr(struct intel_ntb_dev *ndev,
373 int msix_min, int msix_max,
374 int msix_shift, int total_shift)
375{
376 struct pci_dev *pdev;
377 int rc, i, msix_count;
378
379 pdev = ndev_pdev(ndev);
380
381 /* Mask all doorbell interrupts */
382 ndev->db_mask = ndev->db_valid_mask;
383 ndev->reg->db_iowrite(ndev->db_mask,
384 ndev->self_mmio +
385 ndev->self_reg->db_mask);
386
387 /* Try to set up msix irq */
388
389 ndev->vec = kcalloc(msix_max, sizeof(*ndev->vec), GFP_KERNEL);
390 if (!ndev->vec)
391 goto err_msix_vec_alloc;
392
393 ndev->msix = kcalloc(msix_max, sizeof(*ndev->msix), GFP_KERNEL);
394 if (!ndev->msix)
395 goto err_msix_alloc;
396
397 for (i = 0; i < msix_max; ++i)
398 ndev->msix[i].entry = i;
399
400 msix_count = pci_enable_msix_range(pdev, ndev->msix,
401 msix_min, msix_max);
402 if (msix_count < 0)
403 goto err_msix_enable;
404
405 for (i = 0; i < msix_count; ++i) {
406 ndev->vec[i].ndev = ndev;
407 ndev->vec[i].num = i;
408 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
409 "ndev_vec_isr", &ndev->vec[i]);
410 if (rc)
411 goto err_msix_request;
412 }
413
414 dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
415 ndev->db_vec_count = msix_count;
416 ndev->db_vec_shift = msix_shift;
417 return 0;
418
419err_msix_request:
420 while (i-- > 0)
421 free_irq(ndev->msix[i].vector, ndev);
422 pci_disable_msix(pdev);
423err_msix_enable:
424 kfree(ndev->msix);
425err_msix_alloc:
426 kfree(ndev->vec);
427err_msix_vec_alloc:
428 ndev->msix = NULL;
429 ndev->vec = NULL;
430
431 /* Try to set up msi irq */
432
433 rc = pci_enable_msi(pdev);
434 if (rc)
435 goto err_msi_enable;
436
437 rc = request_irq(pdev->irq, ndev_irq_isr, 0,
438 "ndev_irq_isr", ndev);
439 if (rc)
440 goto err_msi_request;
441
442 dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
443 ndev->db_vec_count = 1;
444 ndev->db_vec_shift = total_shift;
445 return 0;
446
447err_msi_request:
448 pci_disable_msi(pdev);
449err_msi_enable:
450
451 /* Try to set up intx irq */
452
453 pci_intx(pdev, 1);
454
455 rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
456 "ndev_irq_isr", ndev);
457 if (rc)
458 goto err_intx_request;
459
460 dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
461 ndev->db_vec_count = 1;
462 ndev->db_vec_shift = total_shift;
463 return 0;
464
465err_intx_request:
466 return rc;
467}
468
469static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
470{
471 struct pci_dev *pdev;
472 int i;
473
474 pdev = ndev_pdev(ndev);
475
476 /* Mask all doorbell interrupts */
477 ndev->db_mask = ndev->db_valid_mask;
478 ndev->reg->db_iowrite(ndev->db_mask,
479 ndev->self_mmio +
480 ndev->self_reg->db_mask);
481
482 if (ndev->msix) {
483 i = ndev->db_vec_count;
484 while (i--)
485 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
486 pci_disable_msix(pdev);
487 kfree(ndev->msix);
488 kfree(ndev->vec);
489 } else {
490 free_irq(pdev->irq, ndev);
491 if (pci_dev_msi_enabled(pdev))
492 pci_disable_msi(pdev);
493 }
494}
495
496static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
497 size_t count, loff_t *offp)
498{
499 struct intel_ntb_dev *ndev;
500 void __iomem *mmio;
501 char *buf;
502 size_t buf_size;
503 ssize_t ret, off;
504 union { u64 v64; u32 v32; u16 v16; } u;
505
506 ndev = filp->private_data;
507 mmio = ndev->self_mmio;
508
509 buf_size = min(count, 0x800ul);
510
511 buf = kmalloc(buf_size, GFP_KERNEL);
512 if (!buf)
513 return -ENOMEM;
514
515 off = 0;
516
517 off += scnprintf(buf + off, buf_size - off,
518 "NTB Device Information:\n");
519
520 off += scnprintf(buf + off, buf_size - off,
521 "Connection Topology -\t%s\n",
522 ntb_topo_string(ndev->ntb.topo));
523
524 off += scnprintf(buf + off, buf_size - off,
525 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
526 off += scnprintf(buf + off, buf_size - off,
527 "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
528 off += scnprintf(buf + off, buf_size - off,
529 "BAR4 Split -\t\t%s\n",
530 ndev->bar4_split ? "yes" : "no");
531
532 off += scnprintf(buf + off, buf_size - off,
533 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
534 off += scnprintf(buf + off, buf_size - off,
535 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
536
537 if (!ndev->reg->link_is_up(ndev)) {
538 off += scnprintf(buf + off, buf_size - off,
539 "Link Status -\t\tDown\n");
540 } else {
541 off += scnprintf(buf + off, buf_size - off,
542 "Link Status -\t\tUp\n");
543 off += scnprintf(buf + off, buf_size - off,
544 "Link Speed -\t\tPCI-E Gen %u\n",
545 NTB_LNK_STA_SPEED(ndev->lnk_sta));
546 off += scnprintf(buf + off, buf_size - off,
547 "Link Width -\t\tx%u\n",
548 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
549 }
550
551 off += scnprintf(buf + off, buf_size - off,
552 "Memory Window Count -\t%u\n", ndev->mw_count);
553 off += scnprintf(buf + off, buf_size - off,
554 "Scratchpad Count -\t%u\n", ndev->spad_count);
555 off += scnprintf(buf + off, buf_size - off,
556 "Doorbell Count -\t%u\n", ndev->db_count);
557 off += scnprintf(buf + off, buf_size - off,
558 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
559 off += scnprintf(buf + off, buf_size - off,
560 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
561
562 off += scnprintf(buf + off, buf_size - off,
563 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
564 off += scnprintf(buf + off, buf_size - off,
565 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
566 off += scnprintf(buf + off, buf_size - off,
567 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
568
569 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
570 off += scnprintf(buf + off, buf_size - off,
571 "Doorbell Mask -\t\t%#llx\n", u.v64);
572
573 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
574 off += scnprintf(buf + off, buf_size - off,
575 "Doorbell Bell -\t\t%#llx\n", u.v64);
576
577 off += scnprintf(buf + off, buf_size - off,
578 "\nNTB Incoming XLAT:\n");
579
580 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
581 off += scnprintf(buf + off, buf_size - off,
582 "XLAT23 -\t\t%#018llx\n", u.v64);
583
584 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
585 off += scnprintf(buf + off, buf_size - off,
586 "XLAT45 -\t\t%#018llx\n", u.v64);
587
588 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
589 off += scnprintf(buf + off, buf_size - off,
590 "LMT23 -\t\t\t%#018llx\n", u.v64);
591
592 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
593 off += scnprintf(buf + off, buf_size - off,
594 "LMT45 -\t\t\t%#018llx\n", u.v64);
595
596 if (pdev_is_snb(ndev->ntb.pdev)) {
597 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
598 off += scnprintf(buf + off, buf_size - off,
599 "\nNTB Outgoing B2B XLAT:\n");
600
601 u.v64 = ioread64(mmio + SNB_PBAR23XLAT_OFFSET);
602 off += scnprintf(buf + off, buf_size - off,
603 "B2B XLAT23 -\t\t%#018llx\n", u.v64);
604
605 u.v64 = ioread64(mmio + SNB_PBAR45XLAT_OFFSET);
606 off += scnprintf(buf + off, buf_size - off,
607 "B2B XLAT45 -\t\t%#018llx\n", u.v64);
608
609 u.v64 = ioread64(mmio + SNB_PBAR23LMT_OFFSET);
610 off += scnprintf(buf + off, buf_size - off,
611 "B2B LMT23 -\t\t%#018llx\n", u.v64);
612
613 u.v64 = ioread64(mmio + SNB_PBAR45LMT_OFFSET);
614 off += scnprintf(buf + off, buf_size - off,
615 "B2B LMT45 -\t\t%#018llx\n", u.v64);
616
617 off += scnprintf(buf + off, buf_size - off,
618 "\nNTB Secondary BAR:\n");
619
620 u.v64 = ioread64(mmio + SNB_SBAR0BASE_OFFSET);
621 off += scnprintf(buf + off, buf_size - off,
622 "SBAR01 -\t\t%#018llx\n", u.v64);
623
624 u.v64 = ioread64(mmio + SNB_SBAR23BASE_OFFSET);
625 off += scnprintf(buf + off, buf_size - off,
626 "SBAR23 -\t\t%#018llx\n", u.v64);
627
628 u.v64 = ioread64(mmio + SNB_SBAR45BASE_OFFSET);
629 off += scnprintf(buf + off, buf_size - off,
630 "SBAR45 -\t\t%#018llx\n", u.v64);
631 }
632
633 off += scnprintf(buf + off, buf_size - off,
634 "\nSNB NTB Statistics:\n");
635
636 u.v16 = ioread16(mmio + SNB_USMEMMISS_OFFSET);
637 off += scnprintf(buf + off, buf_size - off,
638 "Upstream Memory Miss -\t%u\n", u.v16);
639
640 off += scnprintf(buf + off, buf_size - off,
641 "\nSNB NTB Hardware Errors:\n");
642
643 if (!pci_read_config_word(ndev->ntb.pdev,
644 SNB_DEVSTS_OFFSET, &u.v16))
645 off += scnprintf(buf + off, buf_size - off,
646 "DEVSTS -\t\t%#06x\n", u.v16);
647
648 if (!pci_read_config_word(ndev->ntb.pdev,
649 SNB_LINK_STATUS_OFFSET, &u.v16))
650 off += scnprintf(buf + off, buf_size - off,
651 "LNKSTS -\t\t%#06x\n", u.v16);
652
653 if (!pci_read_config_dword(ndev->ntb.pdev,
654 SNB_UNCERRSTS_OFFSET, &u.v32))
655 off += scnprintf(buf + off, buf_size - off,
656 "UNCERRSTS -\t\t%#06x\n", u.v32);
657
658 if (!pci_read_config_dword(ndev->ntb.pdev,
659 SNB_CORERRSTS_OFFSET, &u.v32))
660 off += scnprintf(buf + off, buf_size - off,
661 "CORERRSTS -\t\t%#06x\n", u.v32);
662 }
663
664 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
665 kfree(buf);
666 return ret;
667}
668
669static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
670{
671 if (!debugfs_dir) {
672 ndev->debugfs_dir = NULL;
673 ndev->debugfs_info = NULL;
674 } else {
675 ndev->debugfs_dir =
676 debugfs_create_dir(ndev_name(ndev), debugfs_dir);
677 if (!ndev->debugfs_dir)
678 ndev->debugfs_info = NULL;
679 else
680 ndev->debugfs_info =
681 debugfs_create_file("info", S_IRUSR,
682 ndev->debugfs_dir, ndev,
683 &intel_ntb_debugfs_info);
684 }
685}
686
687static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
688{
689 debugfs_remove_recursive(ndev->debugfs_dir);
690}
691
692static int intel_ntb_mw_count(struct ntb_dev *ntb)
693{
694 return ntb_ndev(ntb)->mw_count;
695}
696
697static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
698 phys_addr_t *base,
699 resource_size_t *size,
700 resource_size_t *align,
701 resource_size_t *align_size)
702{
703 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
704 int bar;
705
706 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
707 idx += 1;
708
709 bar = ndev_mw_to_bar(ndev, idx);
710 if (bar < 0)
711 return bar;
712
713 if (base)
714 *base = pci_resource_start(ndev->ntb.pdev, bar) +
715 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
716
717 if (size)
718 *size = pci_resource_len(ndev->ntb.pdev, bar) -
719 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
720
721 if (align)
722 *align = pci_resource_len(ndev->ntb.pdev, bar);
723
724 if (align_size)
725 *align_size = 1;
726
727 return 0;
728}
729
730static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
731 dma_addr_t addr, resource_size_t size)
732{
733 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
734 unsigned long base_reg, xlat_reg, limit_reg;
735 resource_size_t bar_size, mw_size;
736 void __iomem *mmio;
737 u64 base, limit, reg_val;
738 int bar;
739
740 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
741 idx += 1;
742
743 bar = ndev_mw_to_bar(ndev, idx);
744 if (bar < 0)
745 return bar;
746
747 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
748
749 if (idx == ndev->b2b_idx)
750 mw_size = bar_size - ndev->b2b_off;
751 else
752 mw_size = bar_size;
753
754 /* hardware requires that addr is aligned to bar size */
755 if (addr & (bar_size - 1))
756 return -EINVAL;
757
758 /* make sure the range fits in the usable mw size */
759 if (size > mw_size)
760 return -EINVAL;
761
762 mmio = ndev->self_mmio;
763 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
764 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
765 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
766
767 if (bar < 4 || !ndev->bar4_split) {
768 base = ioread64(mmio + base_reg);
769
770 /* Set the limit if supported, if size is not mw_size */
771 if (limit_reg && size != mw_size)
772 limit = base + size;
773 else
774 limit = 0;
775
776 /* set and verify setting the translation address */
777 iowrite64(addr, mmio + xlat_reg);
778 reg_val = ioread64(mmio + xlat_reg);
779 if (reg_val != addr) {
780 iowrite64(0, mmio + xlat_reg);
781 return -EIO;
782 }
783
784 /* set and verify setting the limit */
785 iowrite64(limit, mmio + limit_reg);
786 reg_val = ioread64(mmio + limit_reg);
787 if (reg_val != limit) {
788 iowrite64(base, mmio + limit_reg);
789 iowrite64(0, mmio + xlat_reg);
790 return -EIO;
791 }
792 } else {
793 /* split bar addr range must all be 32 bit */
794 if (addr & (~0ull << 32))
795 return -EINVAL;
796 if ((addr + size) & (~0ull << 32))
797 return -EINVAL;
798
799 base = ioread32(mmio + base_reg);
800
801 /* Set the limit if supported, if size is not mw_size */
802 if (limit_reg && size != mw_size)
803 limit = base + size;
804 else
805 limit = 0;
806
807 /* set and verify setting the translation address */
808 iowrite32(addr, mmio + xlat_reg);
809 reg_val = ioread32(mmio + xlat_reg);
810 if (reg_val != addr) {
811 iowrite32(0, mmio + xlat_reg);
812 return -EIO;
813 }
814
815 /* set and verify setting the limit */
816 iowrite32(limit, mmio + limit_reg);
817 reg_val = ioread32(mmio + limit_reg);
818 if (reg_val != limit) {
819 iowrite32(base, mmio + limit_reg);
820 iowrite32(0, mmio + xlat_reg);
821 return -EIO;
822 }
823 }
824
825 return 0;
826}
827
828static int intel_ntb_link_is_up(struct ntb_dev *ntb,
829 enum ntb_speed *speed,
830 enum ntb_width *width)
831{
832 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
833
834 if (ndev->reg->link_is_up(ndev)) {
835 if (speed)
836 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
837 if (width)
838 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
839 return 1;
840 } else {
841 /* TODO MAYBE: is it possible to observe the link speed and
842 * width while link is training? */
843 if (speed)
844 *speed = NTB_SPEED_NONE;
845 if (width)
846 *width = NTB_WIDTH_NONE;
847 return 0;
848 }
849}
850
851static int intel_ntb_link_enable(struct ntb_dev *ntb,
852 enum ntb_speed max_speed,
853 enum ntb_width max_width)
854{
855 struct intel_ntb_dev *ndev;
856 u32 ntb_ctl;
857
858 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
859
860 if (ndev->ntb.topo == NTB_TOPO_SEC)
861 return -EINVAL;
862
863 dev_dbg(ndev_dev(ndev),
864 "Enabling link with max_speed %d max_width %d\n",
865 max_speed, max_width);
866 if (max_speed != NTB_SPEED_AUTO)
867 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
868 if (max_width != NTB_WIDTH_AUTO)
869 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
870
871 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
872 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
873 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
874 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
875 if (ndev->bar4_split)
876 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
877 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
878
879 return 0;
880}
881
882static int intel_ntb_link_disable(struct ntb_dev *ntb)
883{
884 struct intel_ntb_dev *ndev;
885 u32 ntb_cntl;
886
887 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
888
889 if (ndev->ntb.topo == NTB_TOPO_SEC)
890 return -EINVAL;
891
892 dev_dbg(ndev_dev(ndev), "Disabling link\n");
893
894 /* Bring NTB link down */
895 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
896 ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
897 ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
898 if (ndev->bar4_split)
899 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
900 ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
901 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
902
903 return 0;
904}
905
906static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
907{
908 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
909}
910
911static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
912{
913 return ntb_ndev(ntb)->db_valid_mask;
914}
915
916static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
917{
918 struct intel_ntb_dev *ndev;
919
920 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
921
922 return ndev->db_vec_count;
923}
924
925static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
926{
927 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
928
929 if (db_vector < 0 || db_vector > ndev->db_vec_count)
930 return 0;
931
932 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
933}
934
935static u64 intel_ntb_db_read(struct ntb_dev *ntb)
936{
937 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
938
939 return ndev_db_read(ndev,
940 ndev->self_mmio +
941 ndev->self_reg->db_bell);
942}
943
944static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
945{
946 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
947
948 return ndev_db_write(ndev, db_bits,
949 ndev->self_mmio +
950 ndev->self_reg->db_bell);
951}
952
953static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
954{
955 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
956
957 return ndev_db_set_mask(ndev, db_bits,
958 ndev->self_mmio +
959 ndev->self_reg->db_mask);
960}
961
962static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
963{
964 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
965
966 return ndev_db_clear_mask(ndev, db_bits,
967 ndev->self_mmio +
968 ndev->self_reg->db_mask);
969}
970
971static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
972 phys_addr_t *db_addr,
973 resource_size_t *db_size)
974{
975 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
976
977 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
978 ndev->peer_reg->db_bell);
979}
980
981static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
982{
983 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
984
985 return ndev_db_write(ndev, db_bits,
986 ndev->peer_mmio +
987 ndev->peer_reg->db_bell);
988}
989
990static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
991{
992 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
993}
994
995static int intel_ntb_spad_count(struct ntb_dev *ntb)
996{
997 struct intel_ntb_dev *ndev;
998
999 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1000
1001 return ndev->spad_count;
1002}
1003
1004static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1005{
1006 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1007
1008 return ndev_spad_read(ndev, idx,
1009 ndev->self_mmio +
1010 ndev->self_reg->spad);
1011}
1012
1013static int intel_ntb_spad_write(struct ntb_dev *ntb,
1014 int idx, u32 val)
1015{
1016 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1017
1018 return ndev_spad_write(ndev, idx, val,
1019 ndev->self_mmio +
1020 ndev->self_reg->spad);
1021}
1022
1023static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1024 phys_addr_t *spad_addr)
1025{
1026 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1027
1028 return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1029 ndev->peer_reg->spad);
1030}
1031
1032static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1033{
1034 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1035
1036 return ndev_spad_read(ndev, idx,
1037 ndev->peer_mmio +
1038 ndev->peer_reg->spad);
1039}
1040
1041static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1042 int idx, u32 val)
1043{
1044 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1045
1046 return ndev_spad_write(ndev, idx, val,
1047 ndev->peer_mmio +
1048 ndev->peer_reg->spad);
1049}
1050
1051/* BWD */
1052
1053static u64 bwd_db_ioread(void __iomem *mmio)
1054{
1055 return ioread64(mmio);
1056}
1057
1058static void bwd_db_iowrite(u64 bits, void __iomem *mmio)
1059{
1060 iowrite64(bits, mmio);
1061}
1062
1063static int bwd_poll_link(struct intel_ntb_dev *ndev)
1064{
1065 u32 ntb_ctl;
1066
1067 ntb_ctl = ioread32(ndev->self_mmio + BWD_NTBCNTL_OFFSET);
1068
1069 if (ntb_ctl == ndev->ntb_ctl)
1070 return 0;
1071
1072 ndev->ntb_ctl = ntb_ctl;
1073
1074 ndev->lnk_sta = ioread32(ndev->self_mmio + BWD_LINK_STATUS_OFFSET);
1075
1076 return 1;
1077}
1078
1079static int bwd_link_is_up(struct intel_ntb_dev *ndev)
1080{
1081 return BWD_NTB_CTL_ACTIVE(ndev->ntb_ctl);
1082}
1083
1084static int bwd_link_is_err(struct intel_ntb_dev *ndev)
1085{
1086 if (ioread32(ndev->self_mmio + BWD_LTSSMSTATEJMP_OFFSET)
1087 & BWD_LTSSMSTATEJMP_FORCEDETECT)
1088 return 1;
1089
1090 if (ioread32(ndev->self_mmio + BWD_IBSTERRRCRVSTS0_OFFSET)
1091 & BWD_IBIST_ERR_OFLOW)
1092 return 1;
1093
1094 return 0;
1095}
1096
1097static inline enum ntb_topo bwd_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
1098{
1099 switch (ppd & BWD_PPD_TOPO_MASK) {
1100 case BWD_PPD_TOPO_B2B_USD:
1101 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1102 return NTB_TOPO_B2B_USD;
1103
1104 case BWD_PPD_TOPO_B2B_DSD:
1105 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1106 return NTB_TOPO_B2B_DSD;
1107
1108 case BWD_PPD_TOPO_PRI_USD:
1109 case BWD_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1110 case BWD_PPD_TOPO_SEC_USD:
1111 case BWD_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1112 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1113 return NTB_TOPO_NONE;
1114 }
1115
1116 dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1117 return NTB_TOPO_NONE;
1118}
1119
1120static void bwd_link_hb(struct work_struct *work)
1121{
1122 struct intel_ntb_dev *ndev = hb_ndev(work);
1123 unsigned long poll_ts;
1124 void __iomem *mmio;
1125 u32 status32;
1126
1127 poll_ts = ndev->last_ts + BWD_LINK_HB_TIMEOUT;
1128
1129 /* Delay polling the link status if an interrupt was received,
1130 * unless the cached link status says the link is down.
1131 */
1132 if (time_after(poll_ts, jiffies) && bwd_link_is_up(ndev)) {
1133 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1134 return;
1135 }
1136
1137 if (bwd_poll_link(ndev))
1138 ntb_link_event(&ndev->ntb);
1139
1140 if (bwd_link_is_up(ndev) || !bwd_link_is_err(ndev)) {
1141 schedule_delayed_work(&ndev->hb_timer, BWD_LINK_HB_TIMEOUT);
1142 return;
1143 }
1144
1145 /* Link is down with error: recover the link! */
1146
1147 mmio = ndev->self_mmio;
1148
1149 /* Driver resets the NTB ModPhy lanes - magic! */
1150 iowrite8(0xe0, mmio + BWD_MODPHY_PCSREG6);
1151 iowrite8(0x40, mmio + BWD_MODPHY_PCSREG4);
1152 iowrite8(0x60, mmio + BWD_MODPHY_PCSREG4);
1153 iowrite8(0x60, mmio + BWD_MODPHY_PCSREG6);
1154
1155 /* Driver waits 100ms to allow the NTB ModPhy to settle */
1156 msleep(100);
1157
1158 /* Clear AER Errors, write to clear */
1159 status32 = ioread32(mmio + BWD_ERRCORSTS_OFFSET);
1160 dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1161 status32 &= PCI_ERR_COR_REP_ROLL;
1162 iowrite32(status32, mmio + BWD_ERRCORSTS_OFFSET);
1163
1164 /* Clear unexpected electrical idle event in LTSSM, write to clear */
1165 status32 = ioread32(mmio + BWD_LTSSMERRSTS0_OFFSET);
1166 dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
1167 status32 |= BWD_LTSSMERRSTS0_UNEXPECTEDEI;
1168 iowrite32(status32, mmio + BWD_LTSSMERRSTS0_OFFSET);
1169
1170 /* Clear DeSkew Buffer error, write to clear */
1171 status32 = ioread32(mmio + BWD_DESKEWSTS_OFFSET);
1172 dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
1173 status32 |= BWD_DESKEWSTS_DBERR;
1174 iowrite32(status32, mmio + BWD_DESKEWSTS_OFFSET);
1175
1176 status32 = ioread32(mmio + BWD_IBSTERRRCRVSTS0_OFFSET);
1177 dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
1178 status32 &= BWD_IBIST_ERR_OFLOW;
1179 iowrite32(status32, mmio + BWD_IBSTERRRCRVSTS0_OFFSET);
1180
1181 /* Releases the NTB state machine to allow the link to retrain */
1182 status32 = ioread32(mmio + BWD_LTSSMSTATEJMP_OFFSET);
1183 dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
1184 status32 &= ~BWD_LTSSMSTATEJMP_FORCEDETECT;
1185 iowrite32(status32, mmio + BWD_LTSSMSTATEJMP_OFFSET);
1186
1187 /* There is a potential race between the 2 NTB devices recovering at the
1188 * same time. If the times are the same, the link will not recover and
1189 * the driver will be stuck in this loop forever. Add a random interval
1190 * to the recovery time to prevent this race.
1191 */
1192 schedule_delayed_work(&ndev->hb_timer, BWD_LINK_RECOVERY_TIME
1193 + prandom_u32() % BWD_LINK_RECOVERY_TIME);
1194}
1195
1196static int bwd_init_isr(struct intel_ntb_dev *ndev)
1197{
1198 int rc;
1199
1200 rc = ndev_init_isr(ndev, 1, BWD_DB_MSIX_VECTOR_COUNT,
1201 BWD_DB_MSIX_VECTOR_SHIFT, BWD_DB_TOTAL_SHIFT);
1202 if (rc)
1203 return rc;
1204
1205 /* BWD doesn't have link status interrupt, poll on that platform */
1206 ndev->last_ts = jiffies;
1207 INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_hb);
1208 schedule_delayed_work(&ndev->hb_timer, BWD_LINK_HB_TIMEOUT);
1209
1210 return 0;
1211}
1212
1213static void bwd_deinit_isr(struct intel_ntb_dev *ndev)
1214{
1215 cancel_delayed_work_sync(&ndev->hb_timer);
1216 ndev_deinit_isr(ndev);
1217}
1218
1219static int bwd_init_ntb(struct intel_ntb_dev *ndev)
1220{
1221 ndev->mw_count = BWD_MW_COUNT;
1222 ndev->spad_count = BWD_SPAD_COUNT;
1223 ndev->db_count = BWD_DB_COUNT;
1224
1225 switch (ndev->ntb.topo) {
1226 case NTB_TOPO_B2B_USD:
1227 case NTB_TOPO_B2B_DSD:
1228 ndev->self_reg = &bwd_pri_reg;
1229 ndev->peer_reg = &bwd_b2b_reg;
1230 ndev->xlat_reg = &bwd_sec_xlat;
1231
1232 /* Enable Bus Master and Memory Space on the secondary side */
1233 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1234 ndev->self_mmio + BWD_SPCICMD_OFFSET);
1235
1236 break;
1237
1238 default:
1239 return -EINVAL;
1240 }
1241
1242 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1243
1244 return 0;
1245}
1246
1247static int bwd_init_dev(struct intel_ntb_dev *ndev)
1248{
1249 u32 ppd;
1250 int rc;
1251
1252 rc = pci_read_config_dword(ndev->ntb.pdev, BWD_PPD_OFFSET, &ppd);
1253 if (rc)
1254 return -EIO;
1255
1256 ndev->ntb.topo = bwd_ppd_topo(ndev, ppd);
1257 if (ndev->ntb.topo == NTB_TOPO_NONE)
1258 return -EINVAL;
1259
1260 rc = bwd_init_ntb(ndev);
1261 if (rc)
1262 return rc;
1263
1264 rc = bwd_init_isr(ndev);
1265 if (rc)
1266 return rc;
1267
1268 if (ndev->ntb.topo != NTB_TOPO_SEC) {
1269 /* Initiate PCI-E link training */
1270 rc = pci_write_config_dword(ndev->ntb.pdev, BWD_PPD_OFFSET,
1271 ppd | BWD_PPD_INIT_LINK);
1272 if (rc)
1273 return rc;
1274 }
1275
1276 return 0;
1277}
1278
1279static void bwd_deinit_dev(struct intel_ntb_dev *ndev)
1280{
1281 bwd_deinit_isr(ndev);
1282}
1283
1284/* SNB */
1285
1286static u64 snb_db_ioread(void __iomem *mmio)
1287{
1288 return (u64)ioread16(mmio);
1289}
1290
1291static void snb_db_iowrite(u64 bits, void __iomem *mmio)
1292{
1293 iowrite16((u16)bits, mmio);
1294}
1295
1296static int snb_poll_link(struct intel_ntb_dev *ndev)
1297{
1298 u16 reg_val;
1299 int rc;
1300
1301 ndev->reg->db_iowrite(ndev->db_link_mask,
1302 ndev->self_mmio +
1303 ndev->self_reg->db_bell);
1304
1305 rc = pci_read_config_word(ndev->ntb.pdev,
1306 SNB_LINK_STATUS_OFFSET, &reg_val);
1307 if (rc)
1308 return 0;
1309
1310 if (reg_val == ndev->lnk_sta)
1311 return 0;
1312
1313 ndev->lnk_sta = reg_val;
1314
1315 return 1;
1316}
1317
1318static int snb_link_is_up(struct intel_ntb_dev *ndev)
1319{
Dave Jiang5ae0beb2015-05-19 16:59:34 -04001320 if (ndev->ntb.topo == NTB_TOPO_SEC)
1321 return 1;
1322
Allen Hubbee26a5842015-04-09 10:33:20 -04001323 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1324}
1325
1326static inline enum ntb_topo snb_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1327{
1328 switch (ppd & SNB_PPD_TOPO_MASK) {
1329 case SNB_PPD_TOPO_B2B_USD:
1330 return NTB_TOPO_B2B_USD;
1331
1332 case SNB_PPD_TOPO_B2B_DSD:
1333 return NTB_TOPO_B2B_DSD;
1334
1335 case SNB_PPD_TOPO_PRI_USD:
1336 case SNB_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1337 return NTB_TOPO_PRI;
1338
1339 case SNB_PPD_TOPO_SEC_USD:
1340 case SNB_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1341 return NTB_TOPO_SEC;
1342 }
1343
1344 return NTB_TOPO_NONE;
1345}
1346
1347static inline int snb_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
1348{
1349 if (ppd & SNB_PPD_SPLIT_BAR_MASK) {
1350 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
1351 return 1;
1352 }
1353 return 0;
1354}
1355
1356static int snb_init_isr(struct intel_ntb_dev *ndev)
1357{
1358 return ndev_init_isr(ndev, SNB_DB_MSIX_VECTOR_COUNT,
1359 SNB_DB_MSIX_VECTOR_COUNT,
1360 SNB_DB_MSIX_VECTOR_SHIFT,
1361 SNB_DB_TOTAL_SHIFT);
1362}
1363
1364static void snb_deinit_isr(struct intel_ntb_dev *ndev)
1365{
1366 ndev_deinit_isr(ndev);
1367}
1368
1369static int snb_setup_b2b_mw(struct intel_ntb_dev *ndev,
1370 const struct intel_b2b_addr *addr,
1371 const struct intel_b2b_addr *peer_addr)
1372{
1373 struct pci_dev *pdev;
1374 void __iomem *mmio;
1375 resource_size_t bar_size;
1376 phys_addr_t bar_addr;
1377 int b2b_bar;
1378 u8 bar_sz;
1379
1380 pdev = ndev_pdev(ndev);
1381 mmio = ndev->self_mmio;
1382
1383 if (ndev->b2b_idx >= ndev->mw_count) {
1384 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1385 b2b_bar = 0;
1386 ndev->b2b_off = 0;
1387 } else {
1388 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1389 if (b2b_bar < 0)
1390 return -EIO;
1391
1392 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
1393
1394 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1395
1396 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
1397
1398 if (b2b_mw_share && SNB_B2B_MIN_SIZE <= bar_size >> 1) {
1399 dev_dbg(ndev_dev(ndev),
1400 "b2b using first half of bar\n");
1401 ndev->b2b_off = bar_size >> 1;
1402 } else if (SNB_B2B_MIN_SIZE <= bar_size) {
1403 dev_dbg(ndev_dev(ndev),
1404 "b2b using whole bar\n");
1405 ndev->b2b_off = 0;
1406 --ndev->mw_count;
1407 } else {
1408 dev_dbg(ndev_dev(ndev),
1409 "b2b bar size is too small\n");
1410 return -EIO;
1411 }
1412 }
1413
1414 /* Reset the secondary bar sizes to match the primary bar sizes,
1415 * except disable or halve the size of the b2b secondary bar.
1416 *
1417 * Note: code for each specific bar size register, because the register
1418 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1419 */
1420 pci_read_config_byte(pdev, SNB_PBAR23SZ_OFFSET, &bar_sz);
1421 dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
1422 if (b2b_bar == 2) {
1423 if (ndev->b2b_off)
1424 bar_sz -= 1;
1425 else
1426 bar_sz = 0;
1427 }
1428 pci_write_config_byte(pdev, SNB_SBAR23SZ_OFFSET, bar_sz);
1429 pci_read_config_byte(pdev, SNB_SBAR23SZ_OFFSET, &bar_sz);
1430 dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
1431
1432 if (!ndev->bar4_split) {
1433 pci_read_config_byte(pdev, SNB_PBAR45SZ_OFFSET, &bar_sz);
1434 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
1435 if (b2b_bar == 4) {
1436 if (ndev->b2b_off)
1437 bar_sz -= 1;
1438 else
1439 bar_sz = 0;
1440 }
1441 pci_write_config_byte(pdev, SNB_SBAR45SZ_OFFSET, bar_sz);
1442 pci_read_config_byte(pdev, SNB_SBAR45SZ_OFFSET, &bar_sz);
1443 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
1444 } else {
1445 pci_read_config_byte(pdev, SNB_PBAR4SZ_OFFSET, &bar_sz);
1446 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
1447 if (b2b_bar == 4) {
1448 if (ndev->b2b_off)
1449 bar_sz -= 1;
1450 else
1451 bar_sz = 0;
1452 }
1453 pci_write_config_byte(pdev, SNB_SBAR4SZ_OFFSET, bar_sz);
1454 pci_read_config_byte(pdev, SNB_SBAR4SZ_OFFSET, &bar_sz);
1455 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
1456
1457 pci_read_config_byte(pdev, SNB_PBAR5SZ_OFFSET, &bar_sz);
1458 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
1459 if (b2b_bar == 5) {
1460 if (ndev->b2b_off)
1461 bar_sz -= 1;
1462 else
1463 bar_sz = 0;
1464 }
1465 pci_write_config_byte(pdev, SNB_SBAR5SZ_OFFSET, bar_sz);
1466 pci_read_config_byte(pdev, SNB_SBAR5SZ_OFFSET, &bar_sz);
1467 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
1468 }
1469
1470 /* SBAR01 hit by first part of the b2b bar */
1471 if (b2b_bar == 0)
1472 bar_addr = addr->bar0_addr;
1473 else if (b2b_bar == 2)
1474 bar_addr = addr->bar2_addr64;
1475 else if (b2b_bar == 4 && !ndev->bar4_split)
1476 bar_addr = addr->bar4_addr64;
1477 else if (b2b_bar == 4)
1478 bar_addr = addr->bar4_addr32;
1479 else if (b2b_bar == 5)
1480 bar_addr = addr->bar5_addr32;
1481 else
1482 return -EIO;
1483
1484 dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
1485 iowrite64(bar_addr, mmio + SNB_SBAR0BASE_OFFSET);
1486
1487 /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1488 * The b2b bar is either disabled above, or configured half-size, and
1489 * it starts at the PBAR xlat + offset.
1490 */
1491
1492 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1493 iowrite64(bar_addr, mmio + SNB_SBAR23BASE_OFFSET);
1494 bar_addr = ioread64(mmio + SNB_SBAR23BASE_OFFSET);
1495 dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
1496
1497 if (!ndev->bar4_split) {
1498 bar_addr = addr->bar4_addr64 +
1499 (b2b_bar == 4 ? ndev->b2b_off : 0);
1500 iowrite64(bar_addr, mmio + SNB_SBAR45BASE_OFFSET);
1501 bar_addr = ioread64(mmio + SNB_SBAR45BASE_OFFSET);
1502 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
1503 } else {
1504 bar_addr = addr->bar4_addr32 +
1505 (b2b_bar == 4 ? ndev->b2b_off : 0);
1506 iowrite32(bar_addr, mmio + SNB_SBAR4BASE_OFFSET);
1507 bar_addr = ioread32(mmio + SNB_SBAR4BASE_OFFSET);
1508 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
1509
1510 bar_addr = addr->bar5_addr32 +
1511 (b2b_bar == 5 ? ndev->b2b_off : 0);
1512 iowrite32(bar_addr, mmio + SNB_SBAR5BASE_OFFSET);
1513 bar_addr = ioread32(mmio + SNB_SBAR5BASE_OFFSET);
1514 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
1515 }
1516
1517 /* setup incoming bar limits == base addrs (zero length windows) */
1518
1519 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1520 iowrite64(bar_addr, mmio + SNB_SBAR23LMT_OFFSET);
1521 bar_addr = ioread64(mmio + SNB_SBAR23LMT_OFFSET);
1522 dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
1523
1524 if (!ndev->bar4_split) {
1525 bar_addr = addr->bar4_addr64 +
1526 (b2b_bar == 4 ? ndev->b2b_off : 0);
1527 iowrite64(bar_addr, mmio + SNB_SBAR45LMT_OFFSET);
1528 bar_addr = ioread64(mmio + SNB_SBAR45LMT_OFFSET);
1529 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
1530 } else {
1531 bar_addr = addr->bar4_addr32 +
1532 (b2b_bar == 4 ? ndev->b2b_off : 0);
1533 iowrite32(bar_addr, mmio + SNB_SBAR4LMT_OFFSET);
1534 bar_addr = ioread32(mmio + SNB_SBAR4LMT_OFFSET);
1535 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
1536
1537 bar_addr = addr->bar5_addr32 +
1538 (b2b_bar == 5 ? ndev->b2b_off : 0);
1539 iowrite32(bar_addr, mmio + SNB_SBAR5LMT_OFFSET);
1540 bar_addr = ioread32(mmio + SNB_SBAR5LMT_OFFSET);
1541 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
1542 }
1543
1544 /* zero incoming translation addrs */
1545 iowrite64(0, mmio + SNB_SBAR23XLAT_OFFSET);
1546
1547 if (!ndev->bar4_split) {
1548 iowrite64(0, mmio + SNB_SBAR45XLAT_OFFSET);
1549 } else {
1550 iowrite32(0, mmio + SNB_SBAR4XLAT_OFFSET);
1551 iowrite32(0, mmio + SNB_SBAR5XLAT_OFFSET);
1552 }
1553
1554 /* zero outgoing translation limits (whole bar size windows) */
1555 iowrite64(0, mmio + SNB_PBAR23LMT_OFFSET);
1556 if (!ndev->bar4_split) {
1557 iowrite64(0, mmio + SNB_PBAR45LMT_OFFSET);
1558 } else {
1559 iowrite32(0, mmio + SNB_PBAR4LMT_OFFSET);
1560 iowrite32(0, mmio + SNB_PBAR5LMT_OFFSET);
1561 }
1562
1563 /* set outgoing translation offsets */
1564 bar_addr = peer_addr->bar2_addr64;
1565 iowrite64(bar_addr, mmio + SNB_PBAR23XLAT_OFFSET);
1566 bar_addr = ioread64(mmio + SNB_PBAR23XLAT_OFFSET);
1567 dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
1568
1569 if (!ndev->bar4_split) {
1570 bar_addr = peer_addr->bar4_addr64;
1571 iowrite64(bar_addr, mmio + SNB_PBAR45XLAT_OFFSET);
1572 bar_addr = ioread64(mmio + SNB_PBAR45XLAT_OFFSET);
1573 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
1574 } else {
1575 bar_addr = peer_addr->bar4_addr32;
1576 iowrite32(bar_addr, mmio + SNB_PBAR4XLAT_OFFSET);
1577 bar_addr = ioread32(mmio + SNB_PBAR4XLAT_OFFSET);
1578 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
1579
1580 bar_addr = peer_addr->bar5_addr32;
1581 iowrite32(bar_addr, mmio + SNB_PBAR5XLAT_OFFSET);
1582 bar_addr = ioread32(mmio + SNB_PBAR5XLAT_OFFSET);
1583 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
1584 }
1585
1586 /* set the translation offset for b2b registers */
1587 if (b2b_bar == 0)
1588 bar_addr = peer_addr->bar0_addr;
1589 else if (b2b_bar == 2)
1590 bar_addr = peer_addr->bar2_addr64;
1591 else if (b2b_bar == 4 && !ndev->bar4_split)
1592 bar_addr = peer_addr->bar4_addr64;
1593 else if (b2b_bar == 4)
1594 bar_addr = peer_addr->bar4_addr32;
1595 else if (b2b_bar == 5)
1596 bar_addr = peer_addr->bar5_addr32;
1597 else
1598 return -EIO;
1599
1600 /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
1601 dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
1602 iowrite32(bar_addr, mmio + SNB_B2B_XLAT_OFFSETL);
1603 iowrite32(bar_addr >> 32, mmio + SNB_B2B_XLAT_OFFSETU);
1604
1605 if (b2b_bar) {
1606 /* map peer ntb mmio config space registers */
1607 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1608 SNB_B2B_MIN_SIZE);
1609 if (!ndev->peer_mmio)
1610 return -EIO;
1611 }
1612
1613 return 0;
1614}
1615
1616static int snb_init_ntb(struct intel_ntb_dev *ndev)
1617{
1618 int rc;
Dave Jiang5ae0beb2015-05-19 16:59:34 -04001619 u32 ntb_ctl;
Allen Hubbee26a5842015-04-09 10:33:20 -04001620
1621 if (ndev->bar4_split)
1622 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1623 else
1624 ndev->mw_count = SNB_MW_COUNT;
1625
1626 ndev->spad_count = SNB_SPAD_COUNT;
1627 ndev->db_count = SNB_DB_COUNT;
1628 ndev->db_link_mask = SNB_DB_LINK_BIT;
1629
1630 switch (ndev->ntb.topo) {
1631 case NTB_TOPO_PRI:
1632 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1633 dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
1634 return -EINVAL;
1635 }
Dave Jiang5ae0beb2015-05-19 16:59:34 -04001636
1637 /* enable link to allow secondary side device to appear */
1638 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1639 ntb_ctl &= ~NTB_CTL_DISABLE;
1640 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1641
Allen Hubbee26a5842015-04-09 10:33:20 -04001642 /* use half the spads for the peer */
1643 ndev->spad_count >>= 1;
1644 ndev->self_reg = &snb_pri_reg;
1645 ndev->peer_reg = &snb_sec_reg;
1646 ndev->xlat_reg = &snb_sec_xlat;
1647 break;
1648
1649 case NTB_TOPO_SEC:
1650 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1651 dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
1652 return -EINVAL;
1653 }
1654 /* use half the spads for the peer */
1655 ndev->spad_count >>= 1;
1656 ndev->self_reg = &snb_sec_reg;
1657 ndev->peer_reg = &snb_pri_reg;
1658 ndev->xlat_reg = &snb_pri_xlat;
1659 break;
1660
1661 case NTB_TOPO_B2B_USD:
1662 case NTB_TOPO_B2B_DSD:
1663 ndev->self_reg = &snb_pri_reg;
1664 ndev->peer_reg = &snb_b2b_reg;
1665 ndev->xlat_reg = &snb_sec_xlat;
1666
1667 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1668 ndev->peer_reg = &snb_pri_reg;
1669
1670 if (b2b_mw_idx < 0)
1671 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1672 else
1673 ndev->b2b_idx = b2b_mw_idx;
1674
1675 dev_dbg(ndev_dev(ndev),
1676 "setting up b2b mw idx %d means %d\n",
1677 b2b_mw_idx, ndev->b2b_idx);
1678
1679 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
1680 dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
1681 ndev->db_count -= 1;
1682 }
1683
1684 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1685 rc = snb_setup_b2b_mw(ndev,
1686 &snb_b2b_dsd_addr,
1687 &snb_b2b_usd_addr);
1688 } else {
1689 rc = snb_setup_b2b_mw(ndev,
1690 &snb_b2b_usd_addr,
1691 &snb_b2b_dsd_addr);
1692 }
1693 if (rc)
1694 return rc;
1695
1696 /* Enable Bus Master and Memory Space on the secondary side */
1697 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1698 ndev->self_mmio + SNB_SPCICMD_OFFSET);
1699
1700 break;
1701
1702 default:
1703 return -EINVAL;
1704 }
1705
1706 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1707
1708 ndev->reg->db_iowrite(ndev->db_valid_mask,
1709 ndev->self_mmio +
1710 ndev->self_reg->db_mask);
1711
1712 return 0;
1713}
1714
1715static int snb_init_dev(struct intel_ntb_dev *ndev)
1716{
1717 struct pci_dev *pdev;
1718 u8 ppd;
1719 int rc, mem;
1720
1721 /* There is a Xeon hardware errata related to writes to SDOORBELL or
1722 * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
1723 * which may hang the system. To workaround this use the second memory
1724 * window to access the interrupt and scratch pad registers on the
1725 * remote system.
1726 */
1727 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1728
1729 /* There is a hardware errata related to accessing any register in
1730 * SB01BASE in the presence of bidirectional traffic crossing the NTB.
1731 */
1732 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1733
1734 /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
1735 * mirrored to the remote system. Shrink the number of bits by one,
1736 * since bit 14 is the last bit.
1737 */
1738 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1739
1740 ndev->reg = &snb_reg;
1741
1742 pdev = ndev_pdev(ndev);
1743
1744 rc = pci_read_config_byte(pdev, SNB_PPD_OFFSET, &ppd);
1745 if (rc)
1746 return -EIO;
1747
1748 ndev->ntb.topo = snb_ppd_topo(ndev, ppd);
1749 dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1750 ntb_topo_string(ndev->ntb.topo));
1751 if (ndev->ntb.topo == NTB_TOPO_NONE)
1752 return -EINVAL;
1753
1754 if (ndev->ntb.topo != NTB_TOPO_SEC) {
1755 ndev->bar4_split = snb_ppd_bar4_split(ndev, ppd);
1756 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
1757 ppd, ndev->bar4_split);
1758 } else {
1759 /* This is a way for transparent BAR to figure out if we are
1760 * doing split BAR or not. There is no way for the hw on the
1761 * transparent side to know and set the PPD.
1762 */
1763 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1764 ndev->bar4_split = hweight32(mem) ==
1765 HSX_SPLIT_BAR_MW_COUNT + 1;
1766 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
1767 mem, ndev->bar4_split);
1768 }
1769
1770 rc = snb_init_ntb(ndev);
1771 if (rc)
1772 return rc;
1773
1774 return snb_init_isr(ndev);
1775}
1776
1777static void snb_deinit_dev(struct intel_ntb_dev *ndev)
1778{
1779 snb_deinit_isr(ndev);
1780}
1781
1782static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1783{
1784 int rc;
1785
1786 pci_set_drvdata(pdev, ndev);
1787
1788 rc = pci_enable_device(pdev);
1789 if (rc)
1790 goto err_pci_enable;
1791
1792 rc = pci_request_regions(pdev, NTB_NAME);
1793 if (rc)
1794 goto err_pci_regions;
1795
1796 pci_set_master(pdev);
1797
1798 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1799 if (rc) {
1800 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1801 if (rc)
1802 goto err_dma_mask;
1803 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
1804 }
1805
1806 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1807 if (rc) {
1808 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1809 if (rc)
1810 goto err_dma_mask;
1811 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
1812 }
1813
1814 ndev->self_mmio = pci_iomap(pdev, 0, 0);
1815 if (!ndev->self_mmio) {
1816 rc = -EIO;
1817 goto err_mmio;
1818 }
1819 ndev->peer_mmio = ndev->self_mmio;
1820
1821 return 0;
1822
1823err_mmio:
1824err_dma_mask:
1825 pci_clear_master(pdev);
1826 pci_release_regions(pdev);
1827err_pci_regions:
1828 pci_disable_device(pdev);
1829err_pci_enable:
1830 pci_set_drvdata(pdev, NULL);
1831 return rc;
1832}
1833
1834static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
1835{
1836 struct pci_dev *pdev = ndev_pdev(ndev);
1837
1838 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
1839 pci_iounmap(pdev, ndev->peer_mmio);
1840 pci_iounmap(pdev, ndev->self_mmio);
1841
1842 pci_clear_master(pdev);
1843 pci_release_regions(pdev);
1844 pci_disable_device(pdev);
1845 pci_set_drvdata(pdev, NULL);
1846}
1847
1848static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1849 struct pci_dev *pdev)
1850{
1851 ndev->ntb.pdev = pdev;
1852 ndev->ntb.topo = NTB_TOPO_NONE;
1853 ndev->ntb.ops = &intel_ntb_ops;
1854
1855 ndev->b2b_off = 0;
1856 ndev->b2b_idx = INT_MAX;
1857
1858 ndev->bar4_split = 0;
1859
1860 ndev->mw_count = 0;
1861 ndev->spad_count = 0;
1862 ndev->db_count = 0;
1863 ndev->db_vec_count = 0;
1864 ndev->db_vec_shift = 0;
1865
1866 ndev->ntb_ctl = 0;
1867 ndev->lnk_sta = 0;
1868
1869 ndev->db_valid_mask = 0;
1870 ndev->db_link_mask = 0;
1871 ndev->db_mask = 0;
1872
1873 spin_lock_init(&ndev->db_mask_lock);
1874}
1875
1876static int intel_ntb_pci_probe(struct pci_dev *pdev,
1877 const struct pci_device_id *id)
1878{
1879 struct intel_ntb_dev *ndev;
1880 int rc;
1881
1882 if (pdev_is_bwd(pdev)) {
1883 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1884 if (!ndev) {
1885 rc = -ENOMEM;
1886 goto err_ndev;
1887 }
1888
1889 ndev_init_struct(ndev, pdev);
1890
1891 rc = intel_ntb_init_pci(ndev, pdev);
1892 if (rc)
1893 goto err_init_pci;
1894
1895 rc = bwd_init_dev(ndev);
1896 if (rc)
1897 goto err_init_dev;
1898
1899 } else if (pdev_is_snb(pdev)) {
1900 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1901 if (!ndev) {
1902 rc = -ENOMEM;
1903 goto err_ndev;
1904 }
1905
1906 ndev_init_struct(ndev, pdev);
1907
1908 rc = intel_ntb_init_pci(ndev, pdev);
1909 if (rc)
1910 goto err_init_pci;
1911
1912 rc = snb_init_dev(ndev);
1913 if (rc)
1914 goto err_init_dev;
1915
1916 } else {
1917 rc = -EINVAL;
1918 goto err_ndev;
1919 }
1920
1921 ndev_reset_unsafe_flags(ndev);
1922
1923 ndev->reg->poll_link(ndev);
1924
1925 ndev_init_debugfs(ndev);
1926
1927 rc = ntb_register_device(&ndev->ntb);
1928 if (rc)
1929 goto err_register;
1930
1931 return 0;
1932
1933err_register:
1934 ndev_deinit_debugfs(ndev);
1935 if (pdev_is_bwd(pdev))
1936 bwd_deinit_dev(ndev);
1937 else if (pdev_is_snb(pdev))
1938 snb_deinit_dev(ndev);
1939err_init_dev:
1940 intel_ntb_deinit_pci(ndev);
1941err_init_pci:
1942 kfree(ndev);
1943err_ndev:
1944 return rc;
1945}
1946
1947static void intel_ntb_pci_remove(struct pci_dev *pdev)
1948{
1949 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
1950
1951 ntb_unregister_device(&ndev->ntb);
1952 ndev_deinit_debugfs(ndev);
1953 if (pdev_is_bwd(pdev))
1954 bwd_deinit_dev(ndev);
1955 else if (pdev_is_snb(pdev))
1956 snb_deinit_dev(ndev);
1957 intel_ntb_deinit_pci(ndev);
1958 kfree(ndev);
1959}
1960
1961static const struct intel_ntb_reg bwd_reg = {
1962 .poll_link = bwd_poll_link,
1963 .link_is_up = bwd_link_is_up,
1964 .db_ioread = bwd_db_ioread,
1965 .db_iowrite = bwd_db_iowrite,
1966 .db_size = sizeof(u64),
1967 .ntb_ctl = BWD_NTBCNTL_OFFSET,
1968 .mw_bar = {2, 4},
1969};
1970
1971static const struct intel_ntb_alt_reg bwd_pri_reg = {
1972 .db_bell = BWD_PDOORBELL_OFFSET,
1973 .db_mask = BWD_PDBMSK_OFFSET,
1974 .spad = BWD_SPAD_OFFSET,
1975};
1976
1977static const struct intel_ntb_alt_reg bwd_b2b_reg = {
1978 .db_bell = BWD_B2B_DOORBELL_OFFSET,
1979 .spad = BWD_B2B_SPAD_OFFSET,
1980};
1981
1982static const struct intel_ntb_xlat_reg bwd_sec_xlat = {
1983 /* FIXME : .bar0_base = BWD_SBAR0BASE_OFFSET, */
1984 /* FIXME : .bar2_limit = BWD_SBAR2LMT_OFFSET, */
1985 .bar2_xlat = BWD_SBAR2XLAT_OFFSET,
1986};
1987
1988static const struct intel_ntb_reg snb_reg = {
1989 .poll_link = snb_poll_link,
1990 .link_is_up = snb_link_is_up,
1991 .db_ioread = snb_db_ioread,
1992 .db_iowrite = snb_db_iowrite,
1993 .db_size = sizeof(u32),
1994 .ntb_ctl = SNB_NTBCNTL_OFFSET,
1995 .mw_bar = {2, 4, 5},
1996};
1997
1998static const struct intel_ntb_alt_reg snb_pri_reg = {
1999 .db_bell = SNB_PDOORBELL_OFFSET,
2000 .db_mask = SNB_PDBMSK_OFFSET,
2001 .spad = SNB_SPAD_OFFSET,
2002};
2003
2004static const struct intel_ntb_alt_reg snb_sec_reg = {
2005 .db_bell = SNB_SDOORBELL_OFFSET,
2006 .db_mask = SNB_SDBMSK_OFFSET,
2007 /* second half of the scratchpads */
2008 .spad = SNB_SPAD_OFFSET + (SNB_SPAD_COUNT << 1),
2009};
2010
2011static const struct intel_ntb_alt_reg snb_b2b_reg = {
2012 .db_bell = SNB_B2B_DOORBELL_OFFSET,
2013 .spad = SNB_B2B_SPAD_OFFSET,
2014};
2015
2016static const struct intel_ntb_xlat_reg snb_pri_xlat = {
2017 /* Note: no primary .bar0_base visible to the secondary side.
2018 *
2019 * The secondary side cannot get the base address stored in primary
2020 * bars. The base address is necessary to set the limit register to
2021 * any value other than zero, or unlimited.
2022 *
2023 * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2024 * window by setting the limit equal to base, nor can it limit the size
2025 * of the memory window by setting the limit to base + size.
2026 */
2027 .bar2_limit = SNB_PBAR23LMT_OFFSET,
2028 .bar2_xlat = SNB_PBAR23XLAT_OFFSET,
2029};
2030
2031static const struct intel_ntb_xlat_reg snb_sec_xlat = {
2032 .bar0_base = SNB_SBAR0BASE_OFFSET,
2033 .bar2_limit = SNB_SBAR23LMT_OFFSET,
2034 .bar2_xlat = SNB_SBAR23XLAT_OFFSET,
2035};
2036
2037static const struct intel_b2b_addr snb_b2b_usd_addr = {
2038 .bar2_addr64 = SNB_B2B_BAR2_USD_ADDR64,
2039 .bar4_addr64 = SNB_B2B_BAR4_USD_ADDR64,
2040 .bar4_addr32 = SNB_B2B_BAR4_USD_ADDR32,
2041 .bar5_addr32 = SNB_B2B_BAR5_USD_ADDR32,
2042};
2043
2044static const struct intel_b2b_addr snb_b2b_dsd_addr = {
2045 .bar2_addr64 = SNB_B2B_BAR2_DSD_ADDR64,
2046 .bar4_addr64 = SNB_B2B_BAR4_DSD_ADDR64,
2047 .bar4_addr32 = SNB_B2B_BAR4_DSD_ADDR32,
2048 .bar5_addr32 = SNB_B2B_BAR5_DSD_ADDR32,
2049};
2050
2051/* operations for primary side of local ntb */
2052static const struct ntb_dev_ops intel_ntb_ops = {
2053 .mw_count = intel_ntb_mw_count,
2054 .mw_get_range = intel_ntb_mw_get_range,
2055 .mw_set_trans = intel_ntb_mw_set_trans,
2056 .link_is_up = intel_ntb_link_is_up,
2057 .link_enable = intel_ntb_link_enable,
2058 .link_disable = intel_ntb_link_disable,
2059 .db_is_unsafe = intel_ntb_db_is_unsafe,
2060 .db_valid_mask = intel_ntb_db_valid_mask,
2061 .db_vector_count = intel_ntb_db_vector_count,
2062 .db_vector_mask = intel_ntb_db_vector_mask,
2063 .db_read = intel_ntb_db_read,
2064 .db_clear = intel_ntb_db_clear,
2065 .db_set_mask = intel_ntb_db_set_mask,
2066 .db_clear_mask = intel_ntb_db_clear_mask,
2067 .peer_db_addr = intel_ntb_peer_db_addr,
2068 .peer_db_set = intel_ntb_peer_db_set,
2069 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2070 .spad_count = intel_ntb_spad_count,
2071 .spad_read = intel_ntb_spad_read,
2072 .spad_write = intel_ntb_spad_write,
2073 .peer_spad_addr = intel_ntb_peer_spad_addr,
2074 .peer_spad_read = intel_ntb_peer_spad_read,
2075 .peer_spad_write = intel_ntb_peer_spad_write,
2076};
2077
2078static const struct file_operations intel_ntb_debugfs_info = {
2079 .owner = THIS_MODULE,
2080 .open = simple_open,
2081 .read = ndev_debugfs_read,
2082};
2083
2084static const struct pci_device_id intel_ntb_pci_tbl[] = {
Jon Masonfce8a7b2012-11-16 19:27:12 -07002085 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2086 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002087 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002088 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2089 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2090 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2091 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2092 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2093 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2094 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2095 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2096 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2097 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002098 {0}
2099};
Allen Hubbee26a5842015-04-09 10:33:20 -04002100MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
Jon Masonfce8a7b2012-11-16 19:27:12 -07002101
Allen Hubbee26a5842015-04-09 10:33:20 -04002102static struct pci_driver intel_ntb_pci_driver = {
2103 .name = KBUILD_MODNAME,
2104 .id_table = intel_ntb_pci_tbl,
2105 .probe = intel_ntb_pci_probe,
2106 .remove = intel_ntb_pci_remove,
Jon Mason6465d022014-04-07 10:55:47 -07002107};
2108
Allen Hubbee26a5842015-04-09 10:33:20 -04002109static int __init intel_ntb_pci_driver_init(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07002110{
Allen Hubbee26a5842015-04-09 10:33:20 -04002111 if (debugfs_initialized())
Jon Mason1517a3f2013-07-30 15:58:49 -07002112 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2113
Allen Hubbee26a5842015-04-09 10:33:20 -04002114 return pci_register_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07002115}
Allen Hubbee26a5842015-04-09 10:33:20 -04002116module_init(intel_ntb_pci_driver_init);
Jon Mason1517a3f2013-07-30 15:58:49 -07002117
Allen Hubbee26a5842015-04-09 10:33:20 -04002118static void __exit intel_ntb_pci_driver_exit(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07002119{
Allen Hubbee26a5842015-04-09 10:33:20 -04002120 pci_unregister_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07002121
Allen Hubbee26a5842015-04-09 10:33:20 -04002122 debugfs_remove_recursive(debugfs_dir);
Jon Mason1517a3f2013-07-30 15:58:49 -07002123}
Allen Hubbee26a5842015-04-09 10:33:20 -04002124module_exit(intel_ntb_pci_driver_exit);
Jon Mason1517a3f2013-07-30 15:58:49 -07002125