blob: 3be323132896fc29d5d5521d578997cc09082625 [file] [log] [blame]
Jon Masonfce8a7b2012-11-16 19:27:12 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -04008 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Serge Semin443b9a12017-01-11 03:11:33 +03009 * Copyright (C) 2016 T-Platforms. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -070010 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * BSD LICENSE
16 *
17 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -040018 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Serge Semin443b9a12017-01-11 03:11:33 +030019 * Copyright (C) 2016 T-Platforms. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -070020 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
24 *
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copy
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
30 * distribution.
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 *
47 * Intel PCIe NTB Linux driver
48 *
49 * Contact Information:
50 * Jon Mason <jon.mason@intel.com>
51 */
Allen Hubbee26a5842015-04-09 10:33:20 -040052
Jon Masonfce8a7b2012-11-16 19:27:12 -070053#include <linux/debugfs.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070054#include <linux/delay.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070055#include <linux/init.h>
56#include <linux/interrupt.h>
57#include <linux/module.h>
58#include <linux/pci.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070059#include <linux/random.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070060#include <linux/slab.h>
Allen Hubbee26a5842015-04-09 10:33:20 -040061#include <linux/ntb.h>
62
Allen Hubbeec110bc2015-05-07 06:45:21 -040063#include "ntb_hw_intel.h"
Jon Masonfce8a7b2012-11-16 19:27:12 -070064
Allen Hubbee26a5842015-04-09 10:33:20 -040065#define NTB_NAME "ntb_hw_intel"
66#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
67#define NTB_VER "2.0"
Jon Masonfce8a7b2012-11-16 19:27:12 -070068
Allen Hubbee26a5842015-04-09 10:33:20 -040069MODULE_DESCRIPTION(NTB_DESC);
Jon Masonfce8a7b2012-11-16 19:27:12 -070070MODULE_VERSION(NTB_VER);
71MODULE_LICENSE("Dual BSD/GPL");
72MODULE_AUTHOR("Intel Corporation");
73
Allen Hubbee26a5842015-04-09 10:33:20 -040074#define bar0_off(base, bar) ((base) + ((bar) << 2))
75#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
Jon Masonfce8a7b2012-11-16 19:27:12 -070076
Dave Jiang2f887b92015-05-20 12:55:47 -040077static const struct intel_ntb_reg xeon_reg;
78static const struct intel_ntb_alt_reg xeon_pri_reg;
79static const struct intel_ntb_alt_reg xeon_sec_reg;
80static const struct intel_ntb_alt_reg xeon_b2b_reg;
81static const struct intel_ntb_xlat_reg xeon_pri_xlat;
82static const struct intel_ntb_xlat_reg xeon_sec_xlat;
83static struct intel_b2b_addr xeon_b2b_usd_addr;
84static struct intel_b2b_addr xeon_b2b_dsd_addr;
Dave Jiang783dfa62016-11-16 14:03:38 -070085static const struct intel_ntb_reg skx_reg;
86static const struct intel_ntb_alt_reg skx_pri_reg;
87static const struct intel_ntb_alt_reg skx_b2b_reg;
88static const struct intel_ntb_xlat_reg skx_sec_xlat;
Allen Hubbe42fefc82015-05-11 05:45:30 -040089static const struct ntb_dev_ops intel_ntb_ops;
Dave Jiang783dfa62016-11-16 14:03:38 -070090static const struct ntb_dev_ops intel_ntb3_ops;
Allen Hubbe42fefc82015-05-11 05:45:30 -040091
92static const struct file_operations intel_ntb_debugfs_info;
93static struct dentry *debugfs_dir;
94
Allen Hubbee26a5842015-04-09 10:33:20 -040095static int b2b_mw_idx = -1;
96module_param(b2b_mw_idx, int, 0644);
97MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
98 "value of zero or positive starts from first mw idx, and a "
99 "negative value starts from last mw idx. Both sides MUST "
100 "set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -0700101
Allen Hubbee26a5842015-04-09 10:33:20 -0400102static unsigned int b2b_mw_share;
103module_param(b2b_mw_share, uint, 0644);
104MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
105 "ntb so that the peer ntb only occupies the first half of "
106 "the mw, so the second half can still be used as a mw. Both "
107 "sides MUST set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -0700108
Dave Jiang2f887b92015-05-20 12:55:47 -0400109module_param_named(xeon_b2b_usd_bar2_addr64,
110 xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
111MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
112 "XEON B2B USD BAR 2 64-bit address");
Allen Hubbee26a5842015-04-09 10:33:20 -0400113
Dave Jiang2f887b92015-05-20 12:55:47 -0400114module_param_named(xeon_b2b_usd_bar4_addr64,
115 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000116MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
Dave Jiang2f887b92015-05-20 12:55:47 -0400117 "XEON B2B USD BAR 4 64-bit address");
Allen Hubbee26a5842015-04-09 10:33:20 -0400118
Dave Jiang2f887b92015-05-20 12:55:47 -0400119module_param_named(xeon_b2b_usd_bar4_addr32,
120 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000121MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400122 "XEON B2B USD split-BAR 4 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400123
Dave Jiang2f887b92015-05-20 12:55:47 -0400124module_param_named(xeon_b2b_usd_bar5_addr32,
125 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000126MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400127 "XEON B2B USD split-BAR 5 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400128
Dave Jiang2f887b92015-05-20 12:55:47 -0400129module_param_named(xeon_b2b_dsd_bar2_addr64,
130 xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
131MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
132 "XEON B2B DSD BAR 2 64-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400133
Dave Jiang2f887b92015-05-20 12:55:47 -0400134module_param_named(xeon_b2b_dsd_bar4_addr64,
135 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000136MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
Dave Jiang2f887b92015-05-20 12:55:47 -0400137 "XEON B2B DSD BAR 4 64-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400138
Dave Jiang2f887b92015-05-20 12:55:47 -0400139module_param_named(xeon_b2b_dsd_bar4_addr32,
140 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000141MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400142 "XEON B2B DSD split-BAR 4 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400143
Dave Jiang2f887b92015-05-20 12:55:47 -0400144module_param_named(xeon_b2b_dsd_bar5_addr32,
145 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000146MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400147 "XEON B2B DSD split-BAR 5 32-bit address");
Jon Mason1517a3f2013-07-30 15:58:49 -0700148
Dave Jiang783dfa62016-11-16 14:03:38 -0700149static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
150static int xeon_init_isr(struct intel_ntb_dev *ndev);
151
Allen Hubbee26a5842015-04-09 10:33:20 -0400152#ifndef ioread64
153#ifdef readq
154#define ioread64 readq
155#else
156#define ioread64 _ioread64
157static inline u64 _ioread64(void __iomem *mmio)
158{
159 u64 low, high;
Jon Mason113bf1c2012-11-16 18:52:57 -0700160
Allen Hubbee26a5842015-04-09 10:33:20 -0400161 low = ioread32(mmio);
162 high = ioread32(mmio + sizeof(u32));
163 return low | (high << 32);
164}
165#endif
166#endif
Jon Masonfce8a7b2012-11-16 19:27:12 -0700167
Allen Hubbee26a5842015-04-09 10:33:20 -0400168#ifndef iowrite64
169#ifdef writeq
170#define iowrite64 writeq
171#else
172#define iowrite64 _iowrite64
173static inline void _iowrite64(u64 val, void __iomem *mmio)
174{
175 iowrite32(val, mmio);
176 iowrite32(val >> 32, mmio + sizeof(u32));
177}
178#endif
179#endif
180
Dave Jiang2f887b92015-05-20 12:55:47 -0400181static inline int pdev_is_xeon(struct pci_dev *pdev)
Allen Hubbee26a5842015-04-09 10:33:20 -0400182{
183 switch (pdev->device) {
184 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
185 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
186 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
187 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -0400188 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
Allen Hubbee26a5842015-04-09 10:33:20 -0400189 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
190 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
191 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
192 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -0400193 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
Allen Hubbee26a5842015-04-09 10:33:20 -0400194 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
195 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
196 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
197 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -0400198 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Allen Hubbee26a5842015-04-09 10:33:20 -0400199 return 1;
200 }
201 return 0;
202}
203
Dave Jiang783dfa62016-11-16 14:03:38 -0700204static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
205{
206 if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
207 return 1;
208
209 return 0;
210}
211
Allen Hubbee26a5842015-04-09 10:33:20 -0400212static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
213{
214 ndev->unsafe_flags = 0;
215 ndev->unsafe_flags_ignore = 0;
216
217 /* Only B2B has a workaround to avoid SDOORBELL */
218 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
219 if (!ntb_topo_is_b2b(ndev->ntb.topo))
220 ndev->unsafe_flags |= NTB_UNSAFE_DB;
221
222 /* No low level workaround to avoid SB01BASE */
223 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
224 ndev->unsafe_flags |= NTB_UNSAFE_DB;
225 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
226 }
227}
228
229static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
230 unsigned long flag)
231{
232 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
233}
234
235static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
236 unsigned long flag)
237{
238 flag &= ndev->unsafe_flags;
239 ndev->unsafe_flags_ignore |= flag;
240
241 return !!flag;
242}
243
244static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
245{
Allen Hubbe9a078262015-08-31 09:31:00 -0400246 if (idx < 0 || idx >= ndev->mw_count)
Allen Hubbee26a5842015-04-09 10:33:20 -0400247 return -EINVAL;
248 return ndev->reg->mw_bar[idx];
249}
250
251static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
252 phys_addr_t *db_addr, resource_size_t *db_size,
253 phys_addr_t reg_addr, unsigned long reg)
254{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400255 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
256 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400257
258 if (db_addr) {
259 *db_addr = reg_addr + reg;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700260 dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -0400261 }
262
263 if (db_size) {
264 *db_size = ndev->reg->db_size;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700265 dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
Allen Hubbee26a5842015-04-09 10:33:20 -0400266 }
267
268 return 0;
269}
270
271static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
272 void __iomem *mmio)
273{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400274 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
275 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400276
277 return ndev->reg->db_ioread(mmio);
278}
279
280static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
281 void __iomem *mmio)
282{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400283 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
284 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400285
286 if (db_bits & ~ndev->db_valid_mask)
287 return -EINVAL;
288
289 ndev->reg->db_iowrite(db_bits, mmio);
290
291 return 0;
292}
293
294static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
295 void __iomem *mmio)
296{
297 unsigned long irqflags;
298
Dave Jiangfd839bf2015-06-15 08:22:30 -0400299 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
300 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400301
302 if (db_bits & ~ndev->db_valid_mask)
303 return -EINVAL;
304
305 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
306 {
307 ndev->db_mask |= db_bits;
308 ndev->reg->db_iowrite(ndev->db_mask, mmio);
309 }
310 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
311
312 return 0;
313}
314
315static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
316 void __iomem *mmio)
317{
318 unsigned long irqflags;
319
Dave Jiangfd839bf2015-06-15 08:22:30 -0400320 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
321 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400322
323 if (db_bits & ~ndev->db_valid_mask)
324 return -EINVAL;
325
326 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
327 {
328 ndev->db_mask &= ~db_bits;
329 ndev->reg->db_iowrite(ndev->db_mask, mmio);
330 }
331 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
332
333 return 0;
334}
335
336static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
337{
338 u64 shift, mask;
339
340 shift = ndev->db_vec_shift;
341 mask = BIT_ULL(shift) - 1;
342
343 return mask << (shift * db_vector);
344}
345
346static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
347 phys_addr_t *spad_addr, phys_addr_t reg_addr,
348 unsigned long reg)
349{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400350 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
351 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400352
353 if (idx < 0 || idx >= ndev->spad_count)
354 return -EINVAL;
355
356 if (spad_addr) {
357 *spad_addr = reg_addr + reg + (idx << 2);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700358 dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
359 *spad_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -0400360 }
361
362 return 0;
363}
364
365static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
366 void __iomem *mmio)
367{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400368 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
369 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400370
371 if (idx < 0 || idx >= ndev->spad_count)
372 return 0;
373
374 return ioread32(mmio + (idx << 2));
375}
376
377static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
378 void __iomem *mmio)
379{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400380 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
381 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400382
383 if (idx < 0 || idx >= ndev->spad_count)
384 return -EINVAL;
385
386 iowrite32(val, mmio + (idx << 2));
387
388 return 0;
389}
390
391static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
392{
393 u64 vec_mask;
394
395 vec_mask = ndev_vec_mask(ndev, vec);
396
Dave Jiang783dfa62016-11-16 14:03:38 -0700397 if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
398 vec_mask |= ndev->db_link_mask;
399
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700400 dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
Allen Hubbee26a5842015-04-09 10:33:20 -0400401
402 ndev->last_ts = jiffies;
403
404 if (vec_mask & ndev->db_link_mask) {
405 if (ndev->reg->poll_link(ndev))
406 ntb_link_event(&ndev->ntb);
407 }
408
409 if (vec_mask & ndev->db_valid_mask)
410 ntb_db_event(&ndev->ntb, vec);
411
412 return IRQ_HANDLED;
413}
414
415static irqreturn_t ndev_vec_isr(int irq, void *dev)
416{
417 struct intel_ntb_vec *nvec = dev;
418
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700419 dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n",
Dave Jiang783dfa62016-11-16 14:03:38 -0700420 irq, nvec->num);
421
Allen Hubbee26a5842015-04-09 10:33:20 -0400422 return ndev_interrupt(nvec->ndev, nvec->num);
423}
424
425static irqreturn_t ndev_irq_isr(int irq, void *dev)
426{
427 struct intel_ntb_dev *ndev = dev;
428
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700429 return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
Allen Hubbee26a5842015-04-09 10:33:20 -0400430}
431
432static int ndev_init_isr(struct intel_ntb_dev *ndev,
433 int msix_min, int msix_max,
434 int msix_shift, int total_shift)
435{
436 struct pci_dev *pdev;
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400437 int rc, i, msix_count, node;
Allen Hubbee26a5842015-04-09 10:33:20 -0400438
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700439 pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400440
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400441 node = dev_to_node(&pdev->dev);
442
Allen Hubbee26a5842015-04-09 10:33:20 -0400443 /* Mask all doorbell interrupts */
444 ndev->db_mask = ndev->db_valid_mask;
445 ndev->reg->db_iowrite(ndev->db_mask,
446 ndev->self_mmio +
447 ndev->self_reg->db_mask);
448
449 /* Try to set up msix irq */
450
Kees Cook590b5b72018-06-12 14:04:20 -0700451 ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400452 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -0400453 if (!ndev->vec)
454 goto err_msix_vec_alloc;
455
Kees Cook590b5b72018-06-12 14:04:20 -0700456 ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400457 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -0400458 if (!ndev->msix)
459 goto err_msix_alloc;
460
461 for (i = 0; i < msix_max; ++i)
462 ndev->msix[i].entry = i;
463
464 msix_count = pci_enable_msix_range(pdev, ndev->msix,
465 msix_min, msix_max);
466 if (msix_count < 0)
467 goto err_msix_enable;
468
469 for (i = 0; i < msix_count; ++i) {
470 ndev->vec[i].ndev = ndev;
471 ndev->vec[i].num = i;
472 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
473 "ndev_vec_isr", &ndev->vec[i]);
474 if (rc)
475 goto err_msix_request;
476 }
477
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700478 dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
Allen Hubbee26a5842015-04-09 10:33:20 -0400479 ndev->db_vec_count = msix_count;
480 ndev->db_vec_shift = msix_shift;
481 return 0;
482
483err_msix_request:
484 while (i-- > 0)
Christophe JAILLET28734e82016-12-19 06:52:55 +0100485 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
Allen Hubbee26a5842015-04-09 10:33:20 -0400486 pci_disable_msix(pdev);
487err_msix_enable:
488 kfree(ndev->msix);
489err_msix_alloc:
490 kfree(ndev->vec);
491err_msix_vec_alloc:
492 ndev->msix = NULL;
493 ndev->vec = NULL;
494
495 /* Try to set up msi irq */
496
497 rc = pci_enable_msi(pdev);
498 if (rc)
499 goto err_msi_enable;
500
501 rc = request_irq(pdev->irq, ndev_irq_isr, 0,
502 "ndev_irq_isr", ndev);
503 if (rc)
504 goto err_msi_request;
505
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700506 dev_dbg(&pdev->dev, "Using msi interrupts\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400507 ndev->db_vec_count = 1;
508 ndev->db_vec_shift = total_shift;
509 return 0;
510
511err_msi_request:
512 pci_disable_msi(pdev);
513err_msi_enable:
514
515 /* Try to set up intx irq */
516
517 pci_intx(pdev, 1);
518
519 rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
520 "ndev_irq_isr", ndev);
521 if (rc)
522 goto err_intx_request;
523
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700524 dev_dbg(&pdev->dev, "Using intx interrupts\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400525 ndev->db_vec_count = 1;
526 ndev->db_vec_shift = total_shift;
527 return 0;
528
529err_intx_request:
530 return rc;
531}
532
533static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
534{
535 struct pci_dev *pdev;
536 int i;
537
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700538 pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400539
540 /* Mask all doorbell interrupts */
541 ndev->db_mask = ndev->db_valid_mask;
542 ndev->reg->db_iowrite(ndev->db_mask,
543 ndev->self_mmio +
544 ndev->self_reg->db_mask);
545
546 if (ndev->msix) {
547 i = ndev->db_vec_count;
548 while (i--)
549 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
550 pci_disable_msix(pdev);
551 kfree(ndev->msix);
552 kfree(ndev->vec);
553 } else {
554 free_irq(pdev->irq, ndev);
555 if (pci_dev_msi_enabled(pdev))
556 pci_disable_msi(pdev);
557 }
558}
559
Dave Jiang783dfa62016-11-16 14:03:38 -0700560static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
561 size_t count, loff_t *offp)
562{
563 struct intel_ntb_dev *ndev;
564 void __iomem *mmio;
565 char *buf;
566 size_t buf_size;
567 ssize_t ret, off;
568 union { u64 v64; u32 v32; u16 v16; } u;
569
570 ndev = filp->private_data;
571 mmio = ndev->self_mmio;
572
573 buf_size = min(count, 0x800ul);
574
575 buf = kmalloc(buf_size, GFP_KERNEL);
576 if (!buf)
577 return -ENOMEM;
578
579 off = 0;
580
581 off += scnprintf(buf + off, buf_size - off,
582 "NTB Device Information:\n");
583
584 off += scnprintf(buf + off, buf_size - off,
585 "Connection Topology -\t%s\n",
586 ntb_topo_string(ndev->ntb.topo));
587
588 off += scnprintf(buf + off, buf_size - off,
589 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
590 off += scnprintf(buf + off, buf_size - off,
591 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
592
593 if (!ndev->reg->link_is_up(ndev))
594 off += scnprintf(buf + off, buf_size - off,
595 "Link Status -\t\tDown\n");
596 else {
597 off += scnprintf(buf + off, buf_size - off,
598 "Link Status -\t\tUp\n");
599 off += scnprintf(buf + off, buf_size - off,
600 "Link Speed -\t\tPCI-E Gen %u\n",
601 NTB_LNK_STA_SPEED(ndev->lnk_sta));
602 off += scnprintf(buf + off, buf_size - off,
603 "Link Width -\t\tx%u\n",
604 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
605 }
606
607 off += scnprintf(buf + off, buf_size - off,
608 "Memory Window Count -\t%u\n", ndev->mw_count);
609 off += scnprintf(buf + off, buf_size - off,
610 "Scratchpad Count -\t%u\n", ndev->spad_count);
611 off += scnprintf(buf + off, buf_size - off,
612 "Doorbell Count -\t%u\n", ndev->db_count);
613 off += scnprintf(buf + off, buf_size - off,
614 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
615 off += scnprintf(buf + off, buf_size - off,
616 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
617
618 off += scnprintf(buf + off, buf_size - off,
619 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
620 off += scnprintf(buf + off, buf_size - off,
621 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
622 off += scnprintf(buf + off, buf_size - off,
623 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
624
625 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
626 off += scnprintf(buf + off, buf_size - off,
627 "Doorbell Mask -\t\t%#llx\n", u.v64);
628
629 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
630 off += scnprintf(buf + off, buf_size - off,
631 "Doorbell Bell -\t\t%#llx\n", u.v64);
632
633 off += scnprintf(buf + off, buf_size - off,
634 "\nNTB Incoming XLAT:\n");
635
636 u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
637 off += scnprintf(buf + off, buf_size - off,
638 "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
639
640 u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
641 off += scnprintf(buf + off, buf_size - off,
642 "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
643
644 u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
645 off += scnprintf(buf + off, buf_size - off,
646 "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
647
648 u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
649 off += scnprintf(buf + off, buf_size - off,
650 "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
651
652 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
653 off += scnprintf(buf + off, buf_size - off,
654 "\nNTB Outgoing B2B XLAT:\n");
655
656 u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
657 off += scnprintf(buf + off, buf_size - off,
658 "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
659
660 u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
661 off += scnprintf(buf + off, buf_size - off,
662 "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
663
664 u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
665 off += scnprintf(buf + off, buf_size - off,
666 "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
667
668 u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
669 off += scnprintf(buf + off, buf_size - off,
670 "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
671
672 off += scnprintf(buf + off, buf_size - off,
673 "\nNTB Secondary BAR:\n");
674
675 u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
676 off += scnprintf(buf + off, buf_size - off,
677 "EMBAR0 -\t\t%#018llx\n", u.v64);
678
679 u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
680 off += scnprintf(buf + off, buf_size - off,
681 "EMBAR1 -\t\t%#018llx\n", u.v64);
682
683 u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
684 off += scnprintf(buf + off, buf_size - off,
685 "EMBAR2 -\t\t%#018llx\n", u.v64);
686 }
687
688 off += scnprintf(buf + off, buf_size - off,
689 "\nNTB Statistics:\n");
690
691 u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
692 off += scnprintf(buf + off, buf_size - off,
693 "Upstream Memory Miss -\t%u\n", u.v16);
694
695 off += scnprintf(buf + off, buf_size - off,
696 "\nNTB Hardware Errors:\n");
697
698 if (!pci_read_config_word(ndev->ntb.pdev,
699 SKX_DEVSTS_OFFSET, &u.v16))
700 off += scnprintf(buf + off, buf_size - off,
701 "DEVSTS -\t\t%#06x\n", u.v16);
702
703 if (!pci_read_config_word(ndev->ntb.pdev,
704 SKX_LINK_STATUS_OFFSET, &u.v16))
705 off += scnprintf(buf + off, buf_size - off,
706 "LNKSTS -\t\t%#06x\n", u.v16);
707
708 if (!pci_read_config_dword(ndev->ntb.pdev,
709 SKX_UNCERRSTS_OFFSET, &u.v32))
710 off += scnprintf(buf + off, buf_size - off,
711 "UNCERRSTS -\t\t%#06x\n", u.v32);
712
713 if (!pci_read_config_dword(ndev->ntb.pdev,
714 SKX_CORERRSTS_OFFSET, &u.v32))
715 off += scnprintf(buf + off, buf_size - off,
716 "CORERRSTS -\t\t%#06x\n", u.v32);
717
718 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
719 kfree(buf);
720 return ret;
721}
722
723static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
724 size_t count, loff_t *offp)
Allen Hubbee26a5842015-04-09 10:33:20 -0400725{
726 struct intel_ntb_dev *ndev;
Allen Hubbe40895272016-07-22 09:38:22 -0400727 struct pci_dev *pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400728 void __iomem *mmio;
729 char *buf;
730 size_t buf_size;
731 ssize_t ret, off;
Allen Hubbe40895272016-07-22 09:38:22 -0400732 union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
Allen Hubbee26a5842015-04-09 10:33:20 -0400733
734 ndev = filp->private_data;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700735 pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400736 mmio = ndev->self_mmio;
737
738 buf_size = min(count, 0x800ul);
739
740 buf = kmalloc(buf_size, GFP_KERNEL);
741 if (!buf)
742 return -ENOMEM;
743
744 off = 0;
745
746 off += scnprintf(buf + off, buf_size - off,
747 "NTB Device Information:\n");
748
749 off += scnprintf(buf + off, buf_size - off,
750 "Connection Topology -\t%s\n",
751 ntb_topo_string(ndev->ntb.topo));
752
Allen Hubbe2aa2a772015-08-31 09:30:59 -0400753 if (ndev->b2b_idx != UINT_MAX) {
754 off += scnprintf(buf + off, buf_size - off,
755 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
756 off += scnprintf(buf + off, buf_size - off,
757 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
758 }
759
Allen Hubbee26a5842015-04-09 10:33:20 -0400760 off += scnprintf(buf + off, buf_size - off,
761 "BAR4 Split -\t\t%s\n",
762 ndev->bar4_split ? "yes" : "no");
763
764 off += scnprintf(buf + off, buf_size - off,
765 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
766 off += scnprintf(buf + off, buf_size - off,
767 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
768
769 if (!ndev->reg->link_is_up(ndev)) {
770 off += scnprintf(buf + off, buf_size - off,
771 "Link Status -\t\tDown\n");
772 } else {
773 off += scnprintf(buf + off, buf_size - off,
774 "Link Status -\t\tUp\n");
775 off += scnprintf(buf + off, buf_size - off,
776 "Link Speed -\t\tPCI-E Gen %u\n",
777 NTB_LNK_STA_SPEED(ndev->lnk_sta));
778 off += scnprintf(buf + off, buf_size - off,
779 "Link Width -\t\tx%u\n",
780 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
781 }
782
783 off += scnprintf(buf + off, buf_size - off,
784 "Memory Window Count -\t%u\n", ndev->mw_count);
785 off += scnprintf(buf + off, buf_size - off,
786 "Scratchpad Count -\t%u\n", ndev->spad_count);
787 off += scnprintf(buf + off, buf_size - off,
788 "Doorbell Count -\t%u\n", ndev->db_count);
789 off += scnprintf(buf + off, buf_size - off,
790 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
791 off += scnprintf(buf + off, buf_size - off,
792 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
793
794 off += scnprintf(buf + off, buf_size - off,
795 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
796 off += scnprintf(buf + off, buf_size - off,
797 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
798 off += scnprintf(buf + off, buf_size - off,
799 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
800
801 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
802 off += scnprintf(buf + off, buf_size - off,
803 "Doorbell Mask -\t\t%#llx\n", u.v64);
804
805 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
806 off += scnprintf(buf + off, buf_size - off,
807 "Doorbell Bell -\t\t%#llx\n", u.v64);
808
809 off += scnprintf(buf + off, buf_size - off,
Allen Hubbe40895272016-07-22 09:38:22 -0400810 "\nNTB Window Size:\n");
811
812 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
813 off += scnprintf(buf + off, buf_size - off,
814 "PBAR23SZ %hhu\n", u.v8);
815 if (!ndev->bar4_split) {
816 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
817 off += scnprintf(buf + off, buf_size - off,
818 "PBAR45SZ %hhu\n", u.v8);
819 } else {
820 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
821 off += scnprintf(buf + off, buf_size - off,
822 "PBAR4SZ %hhu\n", u.v8);
823 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
824 off += scnprintf(buf + off, buf_size - off,
825 "PBAR5SZ %hhu\n", u.v8);
826 }
827
828 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
829 off += scnprintf(buf + off, buf_size - off,
830 "SBAR23SZ %hhu\n", u.v8);
831 if (!ndev->bar4_split) {
832 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
833 off += scnprintf(buf + off, buf_size - off,
834 "SBAR45SZ %hhu\n", u.v8);
835 } else {
836 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
837 off += scnprintf(buf + off, buf_size - off,
838 "SBAR4SZ %hhu\n", u.v8);
839 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
840 off += scnprintf(buf + off, buf_size - off,
841 "SBAR5SZ %hhu\n", u.v8);
842 }
843
844 off += scnprintf(buf + off, buf_size - off,
Allen Hubbee26a5842015-04-09 10:33:20 -0400845 "\nNTB Incoming XLAT:\n");
846
847 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
848 off += scnprintf(buf + off, buf_size - off,
849 "XLAT23 -\t\t%#018llx\n", u.v64);
850
Dave Jiangbf44fe42015-06-18 05:17:30 -0400851 if (ndev->bar4_split) {
852 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
853 off += scnprintf(buf + off, buf_size - off,
854 "XLAT4 -\t\t\t%#06x\n", u.v32);
855
856 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
857 off += scnprintf(buf + off, buf_size - off,
858 "XLAT5 -\t\t\t%#06x\n", u.v32);
859 } else {
860 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
861 off += scnprintf(buf + off, buf_size - off,
862 "XLAT45 -\t\t%#018llx\n", u.v64);
863 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400864
865 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
866 off += scnprintf(buf + off, buf_size - off,
867 "LMT23 -\t\t\t%#018llx\n", u.v64);
868
Dave Jiangbf44fe42015-06-18 05:17:30 -0400869 if (ndev->bar4_split) {
870 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
871 off += scnprintf(buf + off, buf_size - off,
872 "LMT4 -\t\t\t%#06x\n", u.v32);
873 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
874 off += scnprintf(buf + off, buf_size - off,
875 "LMT5 -\t\t\t%#06x\n", u.v32);
876 } else {
877 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
878 off += scnprintf(buf + off, buf_size - off,
879 "LMT45 -\t\t\t%#018llx\n", u.v64);
880 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400881
Allen Hubbe95f14642016-07-22 09:38:23 -0400882 if (pdev_is_xeon(pdev)) {
Allen Hubbee26a5842015-04-09 10:33:20 -0400883 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
884 off += scnprintf(buf + off, buf_size - off,
885 "\nNTB Outgoing B2B XLAT:\n");
886
Dave Jiang2f887b92015-05-20 12:55:47 -0400887 u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400888 off += scnprintf(buf + off, buf_size - off,
889 "B2B XLAT23 -\t\t%#018llx\n", u.v64);
890
Dave Jiangbf44fe42015-06-18 05:17:30 -0400891 if (ndev->bar4_split) {
892 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
893 off += scnprintf(buf + off, buf_size - off,
894 "B2B XLAT4 -\t\t%#06x\n",
895 u.v32);
896 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
897 off += scnprintf(buf + off, buf_size - off,
898 "B2B XLAT5 -\t\t%#06x\n",
899 u.v32);
900 } else {
901 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
902 off += scnprintf(buf + off, buf_size - off,
903 "B2B XLAT45 -\t\t%#018llx\n",
904 u.v64);
905 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400906
Dave Jiang2f887b92015-05-20 12:55:47 -0400907 u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400908 off += scnprintf(buf + off, buf_size - off,
909 "B2B LMT23 -\t\t%#018llx\n", u.v64);
910
Dave Jiangbf44fe42015-06-18 05:17:30 -0400911 if (ndev->bar4_split) {
912 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
913 off += scnprintf(buf + off, buf_size - off,
914 "B2B LMT4 -\t\t%#06x\n",
915 u.v32);
916 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
917 off += scnprintf(buf + off, buf_size - off,
918 "B2B LMT5 -\t\t%#06x\n",
919 u.v32);
920 } else {
921 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
922 off += scnprintf(buf + off, buf_size - off,
923 "B2B LMT45 -\t\t%#018llx\n",
924 u.v64);
925 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400926
927 off += scnprintf(buf + off, buf_size - off,
928 "\nNTB Secondary BAR:\n");
929
Dave Jiang2f887b92015-05-20 12:55:47 -0400930 u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400931 off += scnprintf(buf + off, buf_size - off,
932 "SBAR01 -\t\t%#018llx\n", u.v64);
933
Dave Jiang2f887b92015-05-20 12:55:47 -0400934 u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400935 off += scnprintf(buf + off, buf_size - off,
936 "SBAR23 -\t\t%#018llx\n", u.v64);
937
Dave Jiangbf44fe42015-06-18 05:17:30 -0400938 if (ndev->bar4_split) {
939 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
940 off += scnprintf(buf + off, buf_size - off,
941 "SBAR4 -\t\t\t%#06x\n", u.v32);
942 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
943 off += scnprintf(buf + off, buf_size - off,
944 "SBAR5 -\t\t\t%#06x\n", u.v32);
945 } else {
946 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
947 off += scnprintf(buf + off, buf_size - off,
948 "SBAR45 -\t\t%#018llx\n",
949 u.v64);
950 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400951 }
952
953 off += scnprintf(buf + off, buf_size - off,
Dave Jiang2f887b92015-05-20 12:55:47 -0400954 "\nXEON NTB Statistics:\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400955
Dave Jiang2f887b92015-05-20 12:55:47 -0400956 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400957 off += scnprintf(buf + off, buf_size - off,
958 "Upstream Memory Miss -\t%u\n", u.v16);
959
960 off += scnprintf(buf + off, buf_size - off,
Dave Jiang2f887b92015-05-20 12:55:47 -0400961 "\nXEON NTB Hardware Errors:\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400962
Allen Hubbe95f14642016-07-22 09:38:23 -0400963 if (!pci_read_config_word(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400964 XEON_DEVSTS_OFFSET, &u.v16))
Allen Hubbee26a5842015-04-09 10:33:20 -0400965 off += scnprintf(buf + off, buf_size - off,
966 "DEVSTS -\t\t%#06x\n", u.v16);
967
Allen Hubbe95f14642016-07-22 09:38:23 -0400968 if (!pci_read_config_word(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400969 XEON_LINK_STATUS_OFFSET, &u.v16))
Allen Hubbee26a5842015-04-09 10:33:20 -0400970 off += scnprintf(buf + off, buf_size - off,
971 "LNKSTS -\t\t%#06x\n", u.v16);
972
Allen Hubbe95f14642016-07-22 09:38:23 -0400973 if (!pci_read_config_dword(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400974 XEON_UNCERRSTS_OFFSET, &u.v32))
Allen Hubbee26a5842015-04-09 10:33:20 -0400975 off += scnprintf(buf + off, buf_size - off,
976 "UNCERRSTS -\t\t%#06x\n", u.v32);
977
Allen Hubbe95f14642016-07-22 09:38:23 -0400978 if (!pci_read_config_dword(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400979 XEON_CORERRSTS_OFFSET, &u.v32))
Allen Hubbee26a5842015-04-09 10:33:20 -0400980 off += scnprintf(buf + off, buf_size - off,
981 "CORERRSTS -\t\t%#06x\n", u.v32);
982 }
983
984 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
985 kfree(buf);
986 return ret;
987}
988
Dave Jiang783dfa62016-11-16 14:03:38 -0700989static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
990 size_t count, loff_t *offp)
991{
992 struct intel_ntb_dev *ndev = filp->private_data;
993
Dave Jiang3f775672017-11-20 10:24:08 -0700994 if (pdev_is_xeon(ndev->ntb.pdev))
Dave Jiang783dfa62016-11-16 14:03:38 -0700995 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
996 else if (pdev_is_skx_xeon(ndev->ntb.pdev))
997 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
998
999 return -ENXIO;
1000}
1001
Allen Hubbee26a5842015-04-09 10:33:20 -04001002static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
1003{
1004 if (!debugfs_dir) {
1005 ndev->debugfs_dir = NULL;
1006 ndev->debugfs_info = NULL;
1007 } else {
1008 ndev->debugfs_dir =
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001009 debugfs_create_dir(pci_name(ndev->ntb.pdev),
1010 debugfs_dir);
Allen Hubbee26a5842015-04-09 10:33:20 -04001011 if (!ndev->debugfs_dir)
1012 ndev->debugfs_info = NULL;
1013 else
1014 ndev->debugfs_info =
1015 debugfs_create_file("info", S_IRUSR,
1016 ndev->debugfs_dir, ndev,
1017 &intel_ntb_debugfs_info);
1018 }
1019}
1020
1021static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
1022{
1023 debugfs_remove_recursive(ndev->debugfs_dir);
1024}
1025
Serge Semin443b9a12017-01-11 03:11:33 +03001026static int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
Allen Hubbee26a5842015-04-09 10:33:20 -04001027{
Serge Semin443b9a12017-01-11 03:11:33 +03001028 if (pidx != NTB_DEF_PEER_IDX)
1029 return -EINVAL;
1030
Allen Hubbee26a5842015-04-09 10:33:20 -04001031 return ntb_ndev(ntb)->mw_count;
1032}
1033
Serge Semin443b9a12017-01-11 03:11:33 +03001034static int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
1035 resource_size_t *addr_align,
1036 resource_size_t *size_align,
1037 resource_size_t *size_max)
Allen Hubbee26a5842015-04-09 10:33:20 -04001038{
1039 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
Serge Semin443b9a12017-01-11 03:11:33 +03001040 resource_size_t bar_size, mw_size;
Allen Hubbee26a5842015-04-09 10:33:20 -04001041 int bar;
1042
Serge Semin443b9a12017-01-11 03:11:33 +03001043 if (pidx != NTB_DEF_PEER_IDX)
1044 return -EINVAL;
1045
Allen Hubbee26a5842015-04-09 10:33:20 -04001046 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1047 idx += 1;
1048
1049 bar = ndev_mw_to_bar(ndev, idx);
1050 if (bar < 0)
1051 return bar;
1052
Serge Semin443b9a12017-01-11 03:11:33 +03001053 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
Allen Hubbee26a5842015-04-09 10:33:20 -04001054
Serge Semin443b9a12017-01-11 03:11:33 +03001055 if (idx == ndev->b2b_idx)
1056 mw_size = bar_size - ndev->b2b_off;
1057 else
1058 mw_size = bar_size;
Allen Hubbee26a5842015-04-09 10:33:20 -04001059
Serge Semin443b9a12017-01-11 03:11:33 +03001060 if (addr_align)
1061 *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
Allen Hubbee26a5842015-04-09 10:33:20 -04001062
Serge Semin443b9a12017-01-11 03:11:33 +03001063 if (size_align)
1064 *size_align = 1;
1065
1066 if (size_max)
1067 *size_max = mw_size;
Allen Hubbee26a5842015-04-09 10:33:20 -04001068
1069 return 0;
1070}
1071
Serge Semin443b9a12017-01-11 03:11:33 +03001072static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
Allen Hubbee26a5842015-04-09 10:33:20 -04001073 dma_addr_t addr, resource_size_t size)
1074{
1075 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1076 unsigned long base_reg, xlat_reg, limit_reg;
1077 resource_size_t bar_size, mw_size;
1078 void __iomem *mmio;
1079 u64 base, limit, reg_val;
1080 int bar;
1081
Serge Semin443b9a12017-01-11 03:11:33 +03001082 if (pidx != NTB_DEF_PEER_IDX)
1083 return -EINVAL;
1084
Allen Hubbee26a5842015-04-09 10:33:20 -04001085 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1086 idx += 1;
1087
1088 bar = ndev_mw_to_bar(ndev, idx);
1089 if (bar < 0)
1090 return bar;
1091
1092 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1093
1094 if (idx == ndev->b2b_idx)
1095 mw_size = bar_size - ndev->b2b_off;
1096 else
1097 mw_size = bar_size;
1098
1099 /* hardware requires that addr is aligned to bar size */
1100 if (addr & (bar_size - 1))
1101 return -EINVAL;
1102
1103 /* make sure the range fits in the usable mw size */
1104 if (size > mw_size)
1105 return -EINVAL;
1106
1107 mmio = ndev->self_mmio;
1108 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
1109 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
1110 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
1111
1112 if (bar < 4 || !ndev->bar4_split) {
Dave Jiang703872c2015-11-19 14:00:54 -07001113 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
Allen Hubbee26a5842015-04-09 10:33:20 -04001114
1115 /* Set the limit if supported, if size is not mw_size */
1116 if (limit_reg && size != mw_size)
1117 limit = base + size;
1118 else
1119 limit = 0;
1120
1121 /* set and verify setting the translation address */
1122 iowrite64(addr, mmio + xlat_reg);
1123 reg_val = ioread64(mmio + xlat_reg);
1124 if (reg_val != addr) {
1125 iowrite64(0, mmio + xlat_reg);
1126 return -EIO;
1127 }
1128
1129 /* set and verify setting the limit */
1130 iowrite64(limit, mmio + limit_reg);
1131 reg_val = ioread64(mmio + limit_reg);
1132 if (reg_val != limit) {
1133 iowrite64(base, mmio + limit_reg);
1134 iowrite64(0, mmio + xlat_reg);
1135 return -EIO;
1136 }
1137 } else {
1138 /* split bar addr range must all be 32 bit */
1139 if (addr & (~0ull << 32))
1140 return -EINVAL;
1141 if ((addr + size) & (~0ull << 32))
1142 return -EINVAL;
1143
Dave Jiang703872c2015-11-19 14:00:54 -07001144 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
Allen Hubbee26a5842015-04-09 10:33:20 -04001145
1146 /* Set the limit if supported, if size is not mw_size */
1147 if (limit_reg && size != mw_size)
1148 limit = base + size;
1149 else
1150 limit = 0;
1151
1152 /* set and verify setting the translation address */
1153 iowrite32(addr, mmio + xlat_reg);
1154 reg_val = ioread32(mmio + xlat_reg);
1155 if (reg_val != addr) {
1156 iowrite32(0, mmio + xlat_reg);
1157 return -EIO;
1158 }
1159
1160 /* set and verify setting the limit */
1161 iowrite32(limit, mmio + limit_reg);
1162 reg_val = ioread32(mmio + limit_reg);
1163 if (reg_val != limit) {
1164 iowrite32(base, mmio + limit_reg);
1165 iowrite32(0, mmio + xlat_reg);
1166 return -EIO;
1167 }
1168 }
1169
1170 return 0;
1171}
1172
Serge Semin4e8c11b2016-12-14 02:49:15 +03001173static u64 intel_ntb_link_is_up(struct ntb_dev *ntb,
Allen Hubbee26a5842015-04-09 10:33:20 -04001174 enum ntb_speed *speed,
1175 enum ntb_width *width)
1176{
1177 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1178
1179 if (ndev->reg->link_is_up(ndev)) {
1180 if (speed)
1181 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
1182 if (width)
1183 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
1184 return 1;
1185 } else {
1186 /* TODO MAYBE: is it possible to observe the link speed and
1187 * width while link is training? */
1188 if (speed)
1189 *speed = NTB_SPEED_NONE;
1190 if (width)
1191 *width = NTB_WIDTH_NONE;
1192 return 0;
1193 }
1194}
1195
1196static int intel_ntb_link_enable(struct ntb_dev *ntb,
1197 enum ntb_speed max_speed,
1198 enum ntb_width max_width)
1199{
1200 struct intel_ntb_dev *ndev;
1201 u32 ntb_ctl;
1202
1203 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1204
1205 if (ndev->ntb.topo == NTB_TOPO_SEC)
1206 return -EINVAL;
1207
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001208 dev_dbg(&ntb->pdev->dev,
Allen Hubbee26a5842015-04-09 10:33:20 -04001209 "Enabling link with max_speed %d max_width %d\n",
1210 max_speed, max_width);
1211 if (max_speed != NTB_SPEED_AUTO)
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001212 dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
Allen Hubbee26a5842015-04-09 10:33:20 -04001213 if (max_width != NTB_WIDTH_AUTO)
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001214 dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
Allen Hubbee26a5842015-04-09 10:33:20 -04001215
1216 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1217 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1218 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1219 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1220 if (ndev->bar4_split)
1221 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
1222 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1223
1224 return 0;
1225}
1226
1227static int intel_ntb_link_disable(struct ntb_dev *ntb)
1228{
1229 struct intel_ntb_dev *ndev;
1230 u32 ntb_cntl;
1231
1232 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1233
1234 if (ndev->ntb.topo == NTB_TOPO_SEC)
1235 return -EINVAL;
1236
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001237 dev_dbg(&ntb->pdev->dev, "Disabling link\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001238
1239 /* Bring NTB link down */
1240 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1241 ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1242 ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1243 if (ndev->bar4_split)
1244 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1245 ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1246 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1247
1248 return 0;
1249}
1250
Serge Semin443b9a12017-01-11 03:11:33 +03001251static int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
1252{
1253 /* Numbers of inbound and outbound memory windows match */
1254 return ntb_ndev(ntb)->mw_count;
1255}
1256
1257static int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
1258 phys_addr_t *base, resource_size_t *size)
1259{
1260 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1261 int bar;
1262
1263 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1264 idx += 1;
1265
1266 bar = ndev_mw_to_bar(ndev, idx);
1267 if (bar < 0)
1268 return bar;
1269
1270 if (base)
1271 *base = pci_resource_start(ndev->ntb.pdev, bar) +
1272 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1273
1274 if (size)
1275 *size = pci_resource_len(ndev->ntb.pdev, bar) -
1276 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1277
1278 return 0;
1279}
1280
Allen Hubbee26a5842015-04-09 10:33:20 -04001281static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1282{
1283 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1284}
1285
1286static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1287{
1288 return ntb_ndev(ntb)->db_valid_mask;
1289}
1290
1291static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1292{
1293 struct intel_ntb_dev *ndev;
1294
1295 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1296
1297 return ndev->db_vec_count;
1298}
1299
1300static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1301{
1302 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1303
1304 if (db_vector < 0 || db_vector > ndev->db_vec_count)
1305 return 0;
1306
1307 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1308}
1309
1310static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1311{
1312 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1313
1314 return ndev_db_read(ndev,
1315 ndev->self_mmio +
1316 ndev->self_reg->db_bell);
1317}
1318
1319static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1320{
1321 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1322
1323 return ndev_db_write(ndev, db_bits,
1324 ndev->self_mmio +
1325 ndev->self_reg->db_bell);
1326}
1327
1328static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1329{
1330 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1331
1332 return ndev_db_set_mask(ndev, db_bits,
1333 ndev->self_mmio +
1334 ndev->self_reg->db_mask);
1335}
1336
1337static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1338{
1339 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1340
1341 return ndev_db_clear_mask(ndev, db_bits,
1342 ndev->self_mmio +
1343 ndev->self_reg->db_mask);
1344}
1345
1346static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
1347 phys_addr_t *db_addr,
1348 resource_size_t *db_size)
1349{
1350 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1351
1352 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1353 ndev->peer_reg->db_bell);
1354}
1355
1356static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1357{
1358 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1359
1360 return ndev_db_write(ndev, db_bits,
1361 ndev->peer_mmio +
1362 ndev->peer_reg->db_bell);
1363}
1364
1365static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1366{
1367 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1368}
1369
1370static int intel_ntb_spad_count(struct ntb_dev *ntb)
1371{
1372 struct intel_ntb_dev *ndev;
1373
1374 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1375
1376 return ndev->spad_count;
1377}
1378
1379static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1380{
1381 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1382
1383 return ndev_spad_read(ndev, idx,
1384 ndev->self_mmio +
1385 ndev->self_reg->spad);
1386}
1387
1388static int intel_ntb_spad_write(struct ntb_dev *ntb,
1389 int idx, u32 val)
1390{
1391 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1392
1393 return ndev_spad_write(ndev, idx, val,
1394 ndev->self_mmio +
1395 ndev->self_reg->spad);
1396}
1397
Serge Semind67288a2017-01-11 03:13:20 +03001398static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
Allen Hubbee26a5842015-04-09 10:33:20 -04001399 phys_addr_t *spad_addr)
1400{
1401 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1402
Serge Semind67288a2017-01-11 03:13:20 +03001403 return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
Allen Hubbee26a5842015-04-09 10:33:20 -04001404 ndev->peer_reg->spad);
1405}
1406
Serge Semind67288a2017-01-11 03:13:20 +03001407static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
Allen Hubbee26a5842015-04-09 10:33:20 -04001408{
1409 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1410
Serge Semind67288a2017-01-11 03:13:20 +03001411 return ndev_spad_read(ndev, sidx,
Allen Hubbee26a5842015-04-09 10:33:20 -04001412 ndev->peer_mmio +
1413 ndev->peer_reg->spad);
1414}
1415
Serge Semind67288a2017-01-11 03:13:20 +03001416static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
1417 int sidx, u32 val)
Allen Hubbee26a5842015-04-09 10:33:20 -04001418{
1419 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1420
Serge Semind67288a2017-01-11 03:13:20 +03001421 return ndev_spad_write(ndev, sidx, val,
Allen Hubbee26a5842015-04-09 10:33:20 -04001422 ndev->peer_mmio +
1423 ndev->peer_reg->spad);
1424}
1425
Dave Jiang783dfa62016-11-16 14:03:38 -07001426/* Skylake Xeon NTB */
1427
Dave Jiang939ada52017-02-16 16:22:36 -07001428static int skx_poll_link(struct intel_ntb_dev *ndev)
1429{
1430 u16 reg_val;
1431 int rc;
1432
1433 ndev->reg->db_iowrite(ndev->db_link_mask,
1434 ndev->self_mmio +
1435 ndev->self_reg->db_clear);
1436
1437 rc = pci_read_config_word(ndev->ntb.pdev,
1438 SKX_LINK_STATUS_OFFSET, &reg_val);
1439 if (rc)
1440 return 0;
1441
1442 if (reg_val == ndev->lnk_sta)
1443 return 0;
1444
1445 ndev->lnk_sta = reg_val;
1446
1447 return 1;
1448}
1449
Dave Jiang783dfa62016-11-16 14:03:38 -07001450static u64 skx_db_ioread(void __iomem *mmio)
1451{
1452 return ioread64(mmio);
1453}
1454
1455static void skx_db_iowrite(u64 bits, void __iomem *mmio)
1456{
1457 iowrite64(bits, mmio);
1458}
1459
1460static int skx_init_isr(struct intel_ntb_dev *ndev)
1461{
1462 int i;
1463
1464 /*
1465 * The MSIX vectors and the interrupt status bits are not lined up
1466 * on Skylake. By default the link status bit is bit 32, however it
1467 * is by default MSIX vector0. We need to fixup to line them up.
1468 * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
1469 */
1470
1471 for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
1472 iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
1473
1474 /* move link status down one as workaround */
1475 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
1476 iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
1477 ndev->self_mmio + SKX_INTVEC_OFFSET +
1478 (SKX_DB_MSIX_VECTOR_COUNT - 1));
1479 }
1480
1481 return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
1482 SKX_DB_MSIX_VECTOR_COUNT,
1483 SKX_DB_MSIX_VECTOR_SHIFT,
1484 SKX_DB_TOTAL_SHIFT);
1485}
1486
1487static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
1488 const struct intel_b2b_addr *addr,
1489 const struct intel_b2b_addr *peer_addr)
1490{
1491 struct pci_dev *pdev;
1492 void __iomem *mmio;
Dave Jiang783dfa62016-11-16 14:03:38 -07001493 phys_addr_t bar_addr;
Dave Jiang783dfa62016-11-16 14:03:38 -07001494
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001495 pdev = ndev->ntb.pdev;
Dave Jiang783dfa62016-11-16 14:03:38 -07001496 mmio = ndev->self_mmio;
1497
Dave Jiang783dfa62016-11-16 14:03:38 -07001498 /* setup incoming bar limits == base addrs (zero length windows) */
Dave Jiang4201a992017-11-10 16:45:27 -07001499 bar_addr = addr->bar2_addr64;
Dave Jiang783dfa62016-11-16 14:03:38 -07001500 iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
1501 bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001502 dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
Dave Jiang783dfa62016-11-16 14:03:38 -07001503
Dave Jiang4201a992017-11-10 16:45:27 -07001504 bar_addr = addr->bar4_addr64;
Dave Jiang783dfa62016-11-16 14:03:38 -07001505 iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
1506 bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001507 dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
Dave Jiang783dfa62016-11-16 14:03:38 -07001508
1509 /* zero incoming translation addrs */
1510 iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
1511 iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
1512
1513 ndev->peer_mmio = ndev->self_mmio;
1514
1515 return 0;
1516}
1517
1518static int skx_init_ntb(struct intel_ntb_dev *ndev)
1519{
1520 int rc;
1521
1522
1523 ndev->mw_count = XEON_MW_COUNT;
1524 ndev->spad_count = SKX_SPAD_COUNT;
1525 ndev->db_count = SKX_DB_COUNT;
1526 ndev->db_link_mask = SKX_DB_LINK_BIT;
1527
1528 /* DB fixup for using 31 right now */
1529 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
1530 ndev->db_link_mask |= BIT_ULL(31);
1531
1532 switch (ndev->ntb.topo) {
1533 case NTB_TOPO_B2B_USD:
1534 case NTB_TOPO_B2B_DSD:
1535 ndev->self_reg = &skx_pri_reg;
1536 ndev->peer_reg = &skx_b2b_reg;
1537 ndev->xlat_reg = &skx_sec_xlat;
1538
1539 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1540 rc = skx_setup_b2b_mw(ndev,
1541 &xeon_b2b_dsd_addr,
1542 &xeon_b2b_usd_addr);
1543 } else {
1544 rc = skx_setup_b2b_mw(ndev,
1545 &xeon_b2b_usd_addr,
1546 &xeon_b2b_dsd_addr);
1547 }
1548
1549 if (rc)
1550 return rc;
1551
1552 /* Enable Bus Master and Memory Space on the secondary side */
1553 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1554 ndev->self_mmio + SKX_SPCICMD_OFFSET);
1555
1556 break;
1557
1558 default:
1559 return -EINVAL;
1560 }
1561
1562 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1563
1564 ndev->reg->db_iowrite(ndev->db_valid_mask,
1565 ndev->self_mmio +
1566 ndev->self_reg->db_mask);
1567
1568 return 0;
1569}
1570
1571static int skx_init_dev(struct intel_ntb_dev *ndev)
1572{
1573 struct pci_dev *pdev;
1574 u8 ppd;
1575 int rc;
1576
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001577 pdev = ndev->ntb.pdev;
Dave Jiang783dfa62016-11-16 14:03:38 -07001578
1579 ndev->reg = &skx_reg;
1580
1581 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1582 if (rc)
1583 return -EIO;
1584
1585 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001586 dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
Dave Jiang783dfa62016-11-16 14:03:38 -07001587 ntb_topo_string(ndev->ntb.topo));
1588 if (ndev->ntb.topo == NTB_TOPO_NONE)
1589 return -EINVAL;
1590
1591 if (pdev_is_skx_xeon(pdev))
1592 ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
1593
1594 rc = skx_init_ntb(ndev);
1595 if (rc)
1596 return rc;
1597
1598 return skx_init_isr(ndev);
1599}
1600
1601static int intel_ntb3_link_enable(struct ntb_dev *ntb,
1602 enum ntb_speed max_speed,
1603 enum ntb_width max_width)
1604{
1605 struct intel_ntb_dev *ndev;
1606 u32 ntb_ctl;
1607
1608 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1609
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001610 dev_dbg(&ntb->pdev->dev,
Dave Jiang783dfa62016-11-16 14:03:38 -07001611 "Enabling link with max_speed %d max_width %d\n",
1612 max_speed, max_width);
1613
1614 if (max_speed != NTB_SPEED_AUTO)
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001615 dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
Dave Jiang783dfa62016-11-16 14:03:38 -07001616 if (max_width != NTB_WIDTH_AUTO)
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001617 dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
Dave Jiang783dfa62016-11-16 14:03:38 -07001618
1619 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1620 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1621 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1622 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1623 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1624
1625 return 0;
1626}
Serge Semin443b9a12017-01-11 03:11:33 +03001627static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
Dave Jiang783dfa62016-11-16 14:03:38 -07001628 dma_addr_t addr, resource_size_t size)
1629{
1630 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1631 unsigned long xlat_reg, limit_reg;
1632 resource_size_t bar_size, mw_size;
1633 void __iomem *mmio;
1634 u64 base, limit, reg_val;
1635 int bar;
1636
Serge Semin443b9a12017-01-11 03:11:33 +03001637 if (pidx != NTB_DEF_PEER_IDX)
1638 return -EINVAL;
1639
Dave Jiang783dfa62016-11-16 14:03:38 -07001640 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1641 idx += 1;
1642
1643 bar = ndev_mw_to_bar(ndev, idx);
1644 if (bar < 0)
1645 return bar;
1646
1647 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1648
1649 if (idx == ndev->b2b_idx)
1650 mw_size = bar_size - ndev->b2b_off;
1651 else
1652 mw_size = bar_size;
1653
1654 /* hardware requires that addr is aligned to bar size */
1655 if (addr & (bar_size - 1))
1656 return -EINVAL;
1657
1658 /* make sure the range fits in the usable mw size */
1659 if (size > mw_size)
1660 return -EINVAL;
1661
1662 mmio = ndev->self_mmio;
1663 xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
1664 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
1665 base = pci_resource_start(ndev->ntb.pdev, bar);
1666
1667 /* Set the limit if supported, if size is not mw_size */
1668 if (limit_reg && size != mw_size)
1669 limit = base + size;
1670 else
1671 limit = base + mw_size;
1672
1673 /* set and verify setting the translation address */
1674 iowrite64(addr, mmio + xlat_reg);
1675 reg_val = ioread64(mmio + xlat_reg);
1676 if (reg_val != addr) {
1677 iowrite64(0, mmio + xlat_reg);
1678 return -EIO;
1679 }
1680
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001681 dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
Dave Jiang783dfa62016-11-16 14:03:38 -07001682
1683 /* set and verify setting the limit */
1684 iowrite64(limit, mmio + limit_reg);
1685 reg_val = ioread64(mmio + limit_reg);
1686 if (reg_val != limit) {
1687 iowrite64(base, mmio + limit_reg);
1688 iowrite64(0, mmio + xlat_reg);
1689 return -EIO;
1690 }
1691
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001692 dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
Dave Jiang783dfa62016-11-16 14:03:38 -07001693
1694 /* setup the EP */
1695 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
1696 base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
1697 base &= ~0xf;
1698
1699 if (limit_reg && size != mw_size)
1700 limit = base + size;
1701 else
1702 limit = base + mw_size;
1703
1704 /* set and verify setting the limit */
1705 iowrite64(limit, mmio + limit_reg);
1706 reg_val = ioread64(mmio + limit_reg);
1707 if (reg_val != limit) {
1708 iowrite64(base, mmio + limit_reg);
1709 iowrite64(0, mmio + xlat_reg);
1710 return -EIO;
1711 }
1712
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001713 dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
Dave Jiang783dfa62016-11-16 14:03:38 -07001714
1715 return 0;
1716}
1717
1718static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1719{
1720 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1721 int bit;
1722
1723 if (db_bits & ~ndev->db_valid_mask)
1724 return -EINVAL;
1725
1726 while (db_bits) {
1727 bit = __ffs(db_bits);
1728 iowrite32(1, ndev->peer_mmio +
1729 ndev->peer_reg->db_bell + (bit * 4));
1730 db_bits &= db_bits - 1;
1731 }
1732
1733 return 0;
1734}
1735
1736static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
1737{
1738 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1739
1740 return ndev_db_read(ndev,
1741 ndev->self_mmio +
1742 ndev->self_reg->db_clear);
1743}
1744
1745static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
1746{
1747 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1748
1749 return ndev_db_write(ndev, db_bits,
1750 ndev->self_mmio +
1751 ndev->self_reg->db_clear);
1752}
1753
Dave Jiang2f887b92015-05-20 12:55:47 -04001754/* XEON */
Allen Hubbee26a5842015-04-09 10:33:20 -04001755
Dave Jiang2f887b92015-05-20 12:55:47 -04001756static u64 xeon_db_ioread(void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04001757{
1758 return (u64)ioread16(mmio);
1759}
1760
Dave Jiang2f887b92015-05-20 12:55:47 -04001761static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04001762{
1763 iowrite16((u16)bits, mmio);
1764}
1765
Dave Jiang2f887b92015-05-20 12:55:47 -04001766static int xeon_poll_link(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001767{
1768 u16 reg_val;
1769 int rc;
1770
1771 ndev->reg->db_iowrite(ndev->db_link_mask,
1772 ndev->self_mmio +
1773 ndev->self_reg->db_bell);
1774
1775 rc = pci_read_config_word(ndev->ntb.pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -04001776 XEON_LINK_STATUS_OFFSET, &reg_val);
Allen Hubbee26a5842015-04-09 10:33:20 -04001777 if (rc)
1778 return 0;
1779
1780 if (reg_val == ndev->lnk_sta)
1781 return 0;
1782
1783 ndev->lnk_sta = reg_val;
1784
1785 return 1;
1786}
1787
Dave Jiang2f887b92015-05-20 12:55:47 -04001788static int xeon_link_is_up(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001789{
Dave Jiang5ae0beb2015-05-19 16:59:34 -04001790 if (ndev->ntb.topo == NTB_TOPO_SEC)
1791 return 1;
1792
Allen Hubbee26a5842015-04-09 10:33:20 -04001793 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1794}
1795
Dave Jiang2f887b92015-05-20 12:55:47 -04001796static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04001797{
Dave Jiang2f887b92015-05-20 12:55:47 -04001798 switch (ppd & XEON_PPD_TOPO_MASK) {
1799 case XEON_PPD_TOPO_B2B_USD:
Allen Hubbee26a5842015-04-09 10:33:20 -04001800 return NTB_TOPO_B2B_USD;
1801
Dave Jiang2f887b92015-05-20 12:55:47 -04001802 case XEON_PPD_TOPO_B2B_DSD:
Allen Hubbee26a5842015-04-09 10:33:20 -04001803 return NTB_TOPO_B2B_DSD;
1804
Dave Jiang2f887b92015-05-20 12:55:47 -04001805 case XEON_PPD_TOPO_PRI_USD:
1806 case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04001807 return NTB_TOPO_PRI;
1808
Dave Jiang2f887b92015-05-20 12:55:47 -04001809 case XEON_PPD_TOPO_SEC_USD:
1810 case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04001811 return NTB_TOPO_SEC;
1812 }
1813
1814 return NTB_TOPO_NONE;
1815}
1816
Dave Jiang2f887b92015-05-20 12:55:47 -04001817static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04001818{
Dave Jiang2f887b92015-05-20 12:55:47 -04001819 if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001820 dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04001821 return 1;
1822 }
1823 return 0;
1824}
1825
Dave Jiang2f887b92015-05-20 12:55:47 -04001826static int xeon_init_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001827{
Dave Jiang2f887b92015-05-20 12:55:47 -04001828 return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
1829 XEON_DB_MSIX_VECTOR_COUNT,
1830 XEON_DB_MSIX_VECTOR_SHIFT,
1831 XEON_DB_TOTAL_SHIFT);
Allen Hubbee26a5842015-04-09 10:33:20 -04001832}
1833
Dave Jiang2f887b92015-05-20 12:55:47 -04001834static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001835{
1836 ndev_deinit_isr(ndev);
1837}
1838
Dave Jiang2f887b92015-05-20 12:55:47 -04001839static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1840 const struct intel_b2b_addr *addr,
1841 const struct intel_b2b_addr *peer_addr)
Allen Hubbee26a5842015-04-09 10:33:20 -04001842{
1843 struct pci_dev *pdev;
1844 void __iomem *mmio;
1845 resource_size_t bar_size;
1846 phys_addr_t bar_addr;
1847 int b2b_bar;
1848 u8 bar_sz;
1849
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001850 pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -04001851 mmio = ndev->self_mmio;
1852
Allen Hubbe2aa2a772015-08-31 09:30:59 -04001853 if (ndev->b2b_idx == UINT_MAX) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001854 dev_dbg(&pdev->dev, "not using b2b mw\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001855 b2b_bar = 0;
1856 ndev->b2b_off = 0;
1857 } else {
1858 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1859 if (b2b_bar < 0)
1860 return -EIO;
1861
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001862 dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
Allen Hubbee26a5842015-04-09 10:33:20 -04001863
1864 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1865
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001866 dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
Allen Hubbee26a5842015-04-09 10:33:20 -04001867
Dave Jiang2f887b92015-05-20 12:55:47 -04001868 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001869 dev_dbg(&pdev->dev, "b2b using first half of bar\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001870 ndev->b2b_off = bar_size >> 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04001871 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001872 dev_dbg(&pdev->dev, "b2b using whole bar\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001873 ndev->b2b_off = 0;
1874 --ndev->mw_count;
1875 } else {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001876 dev_dbg(&pdev->dev, "b2b bar size is too small\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001877 return -EIO;
1878 }
1879 }
1880
1881 /* Reset the secondary bar sizes to match the primary bar sizes,
1882 * except disable or halve the size of the b2b secondary bar.
1883 *
1884 * Note: code for each specific bar size register, because the register
1885 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1886 */
Dave Jiang2f887b92015-05-20 12:55:47 -04001887 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001888 dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001889 if (b2b_bar == 2) {
1890 if (ndev->b2b_off)
1891 bar_sz -= 1;
1892 else
1893 bar_sz = 0;
1894 }
Dave Jiang2f887b92015-05-20 12:55:47 -04001895 pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
1896 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001897 dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001898
1899 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04001900 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001901 dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001902 if (b2b_bar == 4) {
1903 if (ndev->b2b_off)
1904 bar_sz -= 1;
1905 else
1906 bar_sz = 0;
1907 }
Dave Jiang2f887b92015-05-20 12:55:47 -04001908 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
1909 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001910 dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001911 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04001912 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001913 dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001914 if (b2b_bar == 4) {
1915 if (ndev->b2b_off)
1916 bar_sz -= 1;
1917 else
1918 bar_sz = 0;
1919 }
Dave Jiang2f887b92015-05-20 12:55:47 -04001920 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
1921 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001922 dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001923
Dave Jiang2f887b92015-05-20 12:55:47 -04001924 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001925 dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001926 if (b2b_bar == 5) {
1927 if (ndev->b2b_off)
1928 bar_sz -= 1;
1929 else
1930 bar_sz = 0;
1931 }
Dave Jiang2f887b92015-05-20 12:55:47 -04001932 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
1933 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001934 dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001935 }
1936
1937 /* SBAR01 hit by first part of the b2b bar */
1938 if (b2b_bar == 0)
1939 bar_addr = addr->bar0_addr;
1940 else if (b2b_bar == 2)
1941 bar_addr = addr->bar2_addr64;
1942 else if (b2b_bar == 4 && !ndev->bar4_split)
1943 bar_addr = addr->bar4_addr64;
1944 else if (b2b_bar == 4)
1945 bar_addr = addr->bar4_addr32;
1946 else if (b2b_bar == 5)
1947 bar_addr = addr->bar5_addr32;
1948 else
1949 return -EIO;
1950
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001951 dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
Dave Jiang2f887b92015-05-20 12:55:47 -04001952 iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001953
1954 /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1955 * The b2b bar is either disabled above, or configured half-size, and
1956 * it starts at the PBAR xlat + offset.
1957 */
1958
1959 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001960 iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
1961 bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001962 dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001963
1964 if (!ndev->bar4_split) {
1965 bar_addr = addr->bar4_addr64 +
1966 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001967 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
1968 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001969 dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001970 } else {
1971 bar_addr = addr->bar4_addr32 +
1972 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001973 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
1974 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001975 dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001976
1977 bar_addr = addr->bar5_addr32 +
1978 (b2b_bar == 5 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001979 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
1980 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001981 dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001982 }
1983
1984 /* setup incoming bar limits == base addrs (zero length windows) */
1985
1986 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001987 iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
1988 bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001989 dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001990
1991 if (!ndev->bar4_split) {
1992 bar_addr = addr->bar4_addr64 +
1993 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001994 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
1995 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001996 dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001997 } else {
1998 bar_addr = addr->bar4_addr32 +
1999 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002000 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
2001 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002002 dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002003
2004 bar_addr = addr->bar5_addr32 +
2005 (b2b_bar == 5 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002006 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
2007 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002008 dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002009 }
2010
2011 /* zero incoming translation addrs */
Dave Jiang2f887b92015-05-20 12:55:47 -04002012 iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002013
2014 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002015 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002016 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002017 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
2018 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002019 }
2020
2021 /* zero outgoing translation limits (whole bar size windows) */
Dave Jiang2f887b92015-05-20 12:55:47 -04002022 iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002023 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002024 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002025 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002026 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
2027 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002028 }
2029
2030 /* set outgoing translation offsets */
2031 bar_addr = peer_addr->bar2_addr64;
Dave Jiang2f887b92015-05-20 12:55:47 -04002032 iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
2033 bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002034 dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002035
2036 if (!ndev->bar4_split) {
2037 bar_addr = peer_addr->bar4_addr64;
Dave Jiang2f887b92015-05-20 12:55:47 -04002038 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
2039 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002040 dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002041 } else {
2042 bar_addr = peer_addr->bar4_addr32;
Dave Jiang2f887b92015-05-20 12:55:47 -04002043 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
2044 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002045 dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002046
2047 bar_addr = peer_addr->bar5_addr32;
Dave Jiang2f887b92015-05-20 12:55:47 -04002048 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
2049 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002050 dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002051 }
2052
2053 /* set the translation offset for b2b registers */
2054 if (b2b_bar == 0)
2055 bar_addr = peer_addr->bar0_addr;
2056 else if (b2b_bar == 2)
2057 bar_addr = peer_addr->bar2_addr64;
2058 else if (b2b_bar == 4 && !ndev->bar4_split)
2059 bar_addr = peer_addr->bar4_addr64;
2060 else if (b2b_bar == 4)
2061 bar_addr = peer_addr->bar4_addr32;
2062 else if (b2b_bar == 5)
2063 bar_addr = peer_addr->bar5_addr32;
2064 else
2065 return -EIO;
2066
2067 /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002068 dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
Dave Jiang2f887b92015-05-20 12:55:47 -04002069 iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
2070 iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
Allen Hubbee26a5842015-04-09 10:33:20 -04002071
2072 if (b2b_bar) {
2073 /* map peer ntb mmio config space registers */
2074 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
Dave Jiang2f887b92015-05-20 12:55:47 -04002075 XEON_B2B_MIN_SIZE);
Allen Hubbee26a5842015-04-09 10:33:20 -04002076 if (!ndev->peer_mmio)
2077 return -EIO;
Dave Jiang25ea9f22016-10-27 11:06:44 -07002078
2079 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
Allen Hubbee26a5842015-04-09 10:33:20 -04002080 }
2081
2082 return 0;
2083}
2084
Dave Jiang2f887b92015-05-20 12:55:47 -04002085static int xeon_init_ntb(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002086{
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002087 struct device *dev = &ndev->ntb.pdev->dev;
Allen Hubbee26a5842015-04-09 10:33:20 -04002088 int rc;
Dave Jiang5ae0beb2015-05-19 16:59:34 -04002089 u32 ntb_ctl;
Allen Hubbee26a5842015-04-09 10:33:20 -04002090
2091 if (ndev->bar4_split)
2092 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
2093 else
Dave Jiang2f887b92015-05-20 12:55:47 -04002094 ndev->mw_count = XEON_MW_COUNT;
Allen Hubbee26a5842015-04-09 10:33:20 -04002095
Dave Jiang2f887b92015-05-20 12:55:47 -04002096 ndev->spad_count = XEON_SPAD_COUNT;
2097 ndev->db_count = XEON_DB_COUNT;
2098 ndev->db_link_mask = XEON_DB_LINK_BIT;
Allen Hubbee26a5842015-04-09 10:33:20 -04002099
2100 switch (ndev->ntb.topo) {
2101 case NTB_TOPO_PRI:
2102 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002103 dev_err(dev, "NTB Primary config disabled\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04002104 return -EINVAL;
2105 }
Dave Jiang5ae0beb2015-05-19 16:59:34 -04002106
2107 /* enable link to allow secondary side device to appear */
2108 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
2109 ntb_ctl &= ~NTB_CTL_DISABLE;
2110 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
2111
Allen Hubbee26a5842015-04-09 10:33:20 -04002112 /* use half the spads for the peer */
2113 ndev->spad_count >>= 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04002114 ndev->self_reg = &xeon_pri_reg;
2115 ndev->peer_reg = &xeon_sec_reg;
2116 ndev->xlat_reg = &xeon_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04002117 break;
2118
2119 case NTB_TOPO_SEC:
2120 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002121 dev_err(dev, "NTB Secondary config disabled\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04002122 return -EINVAL;
2123 }
2124 /* use half the spads for the peer */
2125 ndev->spad_count >>= 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04002126 ndev->self_reg = &xeon_sec_reg;
2127 ndev->peer_reg = &xeon_pri_reg;
2128 ndev->xlat_reg = &xeon_pri_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04002129 break;
2130
2131 case NTB_TOPO_B2B_USD:
2132 case NTB_TOPO_B2B_DSD:
Dave Jiang2f887b92015-05-20 12:55:47 -04002133 ndev->self_reg = &xeon_pri_reg;
2134 ndev->peer_reg = &xeon_b2b_reg;
2135 ndev->xlat_reg = &xeon_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04002136
2137 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002138 ndev->peer_reg = &xeon_pri_reg;
Allen Hubbee26a5842015-04-09 10:33:20 -04002139
2140 if (b2b_mw_idx < 0)
2141 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
2142 else
2143 ndev->b2b_idx = b2b_mw_idx;
2144
Allen Hubbe2aa2a772015-08-31 09:30:59 -04002145 if (ndev->b2b_idx >= ndev->mw_count) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002146 dev_dbg(dev,
Allen Hubbe2aa2a772015-08-31 09:30:59 -04002147 "b2b_mw_idx %d invalid for mw_count %u\n",
2148 b2b_mw_idx, ndev->mw_count);
2149 return -EINVAL;
2150 }
2151
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002152 dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
Allen Hubbee26a5842015-04-09 10:33:20 -04002153 b2b_mw_idx, ndev->b2b_idx);
2154
2155 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002156 dev_warn(dev, "Reduce doorbell count by 1\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04002157 ndev->db_count -= 1;
2158 }
2159
2160 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002161 rc = xeon_setup_b2b_mw(ndev,
2162 &xeon_b2b_dsd_addr,
2163 &xeon_b2b_usd_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002164 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002165 rc = xeon_setup_b2b_mw(ndev,
2166 &xeon_b2b_usd_addr,
2167 &xeon_b2b_dsd_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002168 }
2169 if (rc)
2170 return rc;
2171
2172 /* Enable Bus Master and Memory Space on the secondary side */
2173 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
Dave Jiang2f887b92015-05-20 12:55:47 -04002174 ndev->self_mmio + XEON_SPCICMD_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002175
2176 break;
2177
2178 default:
2179 return -EINVAL;
2180 }
2181
2182 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
2183
2184 ndev->reg->db_iowrite(ndev->db_valid_mask,
2185 ndev->self_mmio +
2186 ndev->self_reg->db_mask);
2187
2188 return 0;
2189}
2190
Dave Jiang2f887b92015-05-20 12:55:47 -04002191static int xeon_init_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002192{
2193 struct pci_dev *pdev;
2194 u8 ppd;
2195 int rc, mem;
2196
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002197 pdev = ndev->ntb.pdev;
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002198
2199 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002200 /* There is a Xeon hardware errata related to writes to SDOORBELL or
2201 * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
2202 * which may hang the system. To workaround this use the second memory
2203 * window to access the interrupt and scratch pad registers on the
2204 * remote system.
2205 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002206 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2207 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2208 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2209 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2210 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2211 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2212 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2213 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2214 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2215 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2216 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2217 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002218 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2219 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2220 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002221 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
2222 break;
2223 }
Allen Hubbee26a5842015-04-09 10:33:20 -04002224
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002225 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002226 /* There is a hardware errata related to accessing any register in
2227 * SB01BASE in the presence of bidirectional traffic crossing the NTB.
2228 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002229 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2230 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2231 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2232 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2233 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2234 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002235 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2236 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2237 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002238 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
2239 break;
2240 }
Allen Hubbee26a5842015-04-09 10:33:20 -04002241
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002242 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002243 /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
2244 * mirrored to the remote system. Shrink the number of bits by one,
2245 * since bit 14 is the last bit.
2246 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002247 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2248 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2249 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2250 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2251 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2252 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2253 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2254 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2255 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2256 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2257 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2258 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002259 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2260 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2261 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002262 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
2263 break;
2264 }
Allen Hubbee26a5842015-04-09 10:33:20 -04002265
Dave Jiang2f887b92015-05-20 12:55:47 -04002266 ndev->reg = &xeon_reg;
Allen Hubbee26a5842015-04-09 10:33:20 -04002267
Dave Jiang2f887b92015-05-20 12:55:47 -04002268 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04002269 if (rc)
2270 return -EIO;
2271
Dave Jiang2f887b92015-05-20 12:55:47 -04002272 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002273 dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
Allen Hubbee26a5842015-04-09 10:33:20 -04002274 ntb_topo_string(ndev->ntb.topo));
2275 if (ndev->ntb.topo == NTB_TOPO_NONE)
2276 return -EINVAL;
2277
2278 if (ndev->ntb.topo != NTB_TOPO_SEC) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002279 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002280 dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
Allen Hubbee26a5842015-04-09 10:33:20 -04002281 ppd, ndev->bar4_split);
2282 } else {
2283 /* This is a way for transparent BAR to figure out if we are
2284 * doing split BAR or not. There is no way for the hw on the
2285 * transparent side to know and set the PPD.
2286 */
2287 mem = pci_select_bars(pdev, IORESOURCE_MEM);
2288 ndev->bar4_split = hweight32(mem) ==
2289 HSX_SPLIT_BAR_MW_COUNT + 1;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002290 dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
Allen Hubbee26a5842015-04-09 10:33:20 -04002291 mem, ndev->bar4_split);
2292 }
2293
Dave Jiang2f887b92015-05-20 12:55:47 -04002294 rc = xeon_init_ntb(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002295 if (rc)
2296 return rc;
2297
Dave Jiang2f887b92015-05-20 12:55:47 -04002298 return xeon_init_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002299}
2300
Dave Jiang2f887b92015-05-20 12:55:47 -04002301static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002302{
Dave Jiang2f887b92015-05-20 12:55:47 -04002303 xeon_deinit_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002304}
2305
2306static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
2307{
2308 int rc;
2309
2310 pci_set_drvdata(pdev, ndev);
2311
2312 rc = pci_enable_device(pdev);
2313 if (rc)
2314 goto err_pci_enable;
2315
2316 rc = pci_request_regions(pdev, NTB_NAME);
2317 if (rc)
2318 goto err_pci_regions;
2319
2320 pci_set_master(pdev);
2321
2322 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2323 if (rc) {
2324 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2325 if (rc)
2326 goto err_dma_mask;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002327 dev_warn(&pdev->dev, "Cannot DMA highmem\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04002328 }
2329
2330 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2331 if (rc) {
2332 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2333 if (rc)
2334 goto err_dma_mask;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002335 dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04002336 }
Serge Semin417cf392017-12-06 17:31:53 +03002337 rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
2338 dma_get_mask(&pdev->dev));
2339 if (rc)
2340 goto err_dma_mask;
Allen Hubbee26a5842015-04-09 10:33:20 -04002341
2342 ndev->self_mmio = pci_iomap(pdev, 0, 0);
2343 if (!ndev->self_mmio) {
2344 rc = -EIO;
2345 goto err_mmio;
2346 }
2347 ndev->peer_mmio = ndev->self_mmio;
Dave Jiang25ea9f22016-10-27 11:06:44 -07002348 ndev->peer_addr = pci_resource_start(pdev, 0);
Allen Hubbee26a5842015-04-09 10:33:20 -04002349
2350 return 0;
2351
2352err_mmio:
2353err_dma_mask:
2354 pci_clear_master(pdev);
2355 pci_release_regions(pdev);
2356err_pci_regions:
2357 pci_disable_device(pdev);
2358err_pci_enable:
2359 pci_set_drvdata(pdev, NULL);
2360 return rc;
2361}
2362
2363static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
2364{
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07002365 struct pci_dev *pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -04002366
2367 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
2368 pci_iounmap(pdev, ndev->peer_mmio);
2369 pci_iounmap(pdev, ndev->self_mmio);
2370
2371 pci_clear_master(pdev);
2372 pci_release_regions(pdev);
2373 pci_disable_device(pdev);
2374 pci_set_drvdata(pdev, NULL);
2375}
2376
2377static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
2378 struct pci_dev *pdev)
2379{
2380 ndev->ntb.pdev = pdev;
2381 ndev->ntb.topo = NTB_TOPO_NONE;
2382 ndev->ntb.ops = &intel_ntb_ops;
2383
2384 ndev->b2b_off = 0;
Allen Hubbe2aa2a772015-08-31 09:30:59 -04002385 ndev->b2b_idx = UINT_MAX;
Allen Hubbee26a5842015-04-09 10:33:20 -04002386
2387 ndev->bar4_split = 0;
2388
2389 ndev->mw_count = 0;
2390 ndev->spad_count = 0;
2391 ndev->db_count = 0;
2392 ndev->db_vec_count = 0;
2393 ndev->db_vec_shift = 0;
2394
2395 ndev->ntb_ctl = 0;
2396 ndev->lnk_sta = 0;
2397
2398 ndev->db_valid_mask = 0;
2399 ndev->db_link_mask = 0;
2400 ndev->db_mask = 0;
2401
2402 spin_lock_init(&ndev->db_mask_lock);
2403}
2404
2405static int intel_ntb_pci_probe(struct pci_dev *pdev,
2406 const struct pci_device_id *id)
2407{
2408 struct intel_ntb_dev *ndev;
Allen Hubbe0e041fb2015-05-19 12:04:52 -04002409 int rc, node;
2410
2411 node = dev_to_node(&pdev->dev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002412
Dave Jiang3f775672017-11-20 10:24:08 -07002413 if (pdev_is_xeon(pdev)) {
Allen Hubbe0e041fb2015-05-19 12:04:52 -04002414 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -04002415 if (!ndev) {
2416 rc = -ENOMEM;
2417 goto err_ndev;
2418 }
2419
2420 ndev_init_struct(ndev, pdev);
2421
2422 rc = intel_ntb_init_pci(ndev, pdev);
2423 if (rc)
2424 goto err_init_pci;
2425
Dave Jiang2f887b92015-05-20 12:55:47 -04002426 rc = xeon_init_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002427 if (rc)
2428 goto err_init_dev;
2429
Dave Jiang783dfa62016-11-16 14:03:38 -07002430 } else if (pdev_is_skx_xeon(pdev)) {
2431 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2432 if (!ndev) {
2433 rc = -ENOMEM;
2434 goto err_ndev;
2435 }
2436
2437 ndev_init_struct(ndev, pdev);
2438 ndev->ntb.ops = &intel_ntb3_ops;
2439
2440 rc = intel_ntb_init_pci(ndev, pdev);
2441 if (rc)
2442 goto err_init_pci;
2443
2444 rc = skx_init_dev(ndev);
2445 if (rc)
2446 goto err_init_dev;
2447
Allen Hubbee26a5842015-04-09 10:33:20 -04002448 } else {
2449 rc = -EINVAL;
2450 goto err_ndev;
2451 }
2452
2453 ndev_reset_unsafe_flags(ndev);
2454
2455 ndev->reg->poll_link(ndev);
2456
2457 ndev_init_debugfs(ndev);
2458
2459 rc = ntb_register_device(&ndev->ntb);
2460 if (rc)
2461 goto err_register;
2462
Dave Jiang7eb38782015-06-15 08:21:33 -04002463 dev_info(&pdev->dev, "NTB device registered.\n");
2464
Allen Hubbee26a5842015-04-09 10:33:20 -04002465 return 0;
2466
2467err_register:
2468 ndev_deinit_debugfs(ndev);
Dave Jiang3f775672017-11-20 10:24:08 -07002469 if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
Dave Jiang2f887b92015-05-20 12:55:47 -04002470 xeon_deinit_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002471err_init_dev:
2472 intel_ntb_deinit_pci(ndev);
2473err_init_pci:
2474 kfree(ndev);
2475err_ndev:
2476 return rc;
2477}
2478
2479static void intel_ntb_pci_remove(struct pci_dev *pdev)
2480{
2481 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
2482
2483 ntb_unregister_device(&ndev->ntb);
2484 ndev_deinit_debugfs(ndev);
Dave Jiang3f775672017-11-20 10:24:08 -07002485 if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
Dave Jiang2f887b92015-05-20 12:55:47 -04002486 xeon_deinit_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002487 intel_ntb_deinit_pci(ndev);
2488 kfree(ndev);
2489}
2490
Dave Jiang2f887b92015-05-20 12:55:47 -04002491static const struct intel_ntb_reg xeon_reg = {
2492 .poll_link = xeon_poll_link,
2493 .link_is_up = xeon_link_is_up,
2494 .db_ioread = xeon_db_ioread,
2495 .db_iowrite = xeon_db_iowrite,
Allen Hubbee26a5842015-04-09 10:33:20 -04002496 .db_size = sizeof(u32),
Dave Jiang2f887b92015-05-20 12:55:47 -04002497 .ntb_ctl = XEON_NTBCNTL_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002498 .mw_bar = {2, 4, 5},
2499};
2500
Dave Jiang2f887b92015-05-20 12:55:47 -04002501static const struct intel_ntb_alt_reg xeon_pri_reg = {
2502 .db_bell = XEON_PDOORBELL_OFFSET,
2503 .db_mask = XEON_PDBMSK_OFFSET,
2504 .spad = XEON_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002505};
2506
Dave Jiang2f887b92015-05-20 12:55:47 -04002507static const struct intel_ntb_alt_reg xeon_sec_reg = {
2508 .db_bell = XEON_SDOORBELL_OFFSET,
2509 .db_mask = XEON_SDBMSK_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002510 /* second half of the scratchpads */
Dave Jiang2f887b92015-05-20 12:55:47 -04002511 .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
Allen Hubbee26a5842015-04-09 10:33:20 -04002512};
2513
Dave Jiang2f887b92015-05-20 12:55:47 -04002514static const struct intel_ntb_alt_reg xeon_b2b_reg = {
2515 .db_bell = XEON_B2B_DOORBELL_OFFSET,
2516 .spad = XEON_B2B_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002517};
2518
Dave Jiang2f887b92015-05-20 12:55:47 -04002519static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
Allen Hubbee26a5842015-04-09 10:33:20 -04002520 /* Note: no primary .bar0_base visible to the secondary side.
2521 *
2522 * The secondary side cannot get the base address stored in primary
2523 * bars. The base address is necessary to set the limit register to
2524 * any value other than zero, or unlimited.
2525 *
2526 * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2527 * window by setting the limit equal to base, nor can it limit the size
2528 * of the memory window by setting the limit to base + size.
2529 */
Dave Jiang2f887b92015-05-20 12:55:47 -04002530 .bar2_limit = XEON_PBAR23LMT_OFFSET,
2531 .bar2_xlat = XEON_PBAR23XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002532};
2533
Dave Jiang2f887b92015-05-20 12:55:47 -04002534static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
2535 .bar0_base = XEON_SBAR0BASE_OFFSET,
2536 .bar2_limit = XEON_SBAR23LMT_OFFSET,
2537 .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002538};
2539
Dave Jiang2f887b92015-05-20 12:55:47 -04002540static struct intel_b2b_addr xeon_b2b_usd_addr = {
Dave Jiang8b782fa2015-09-24 13:03:05 -07002541 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
2542 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
2543 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
2544 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
Allen Hubbee26a5842015-04-09 10:33:20 -04002545};
2546
Dave Jiang2f887b92015-05-20 12:55:47 -04002547static struct intel_b2b_addr xeon_b2b_dsd_addr = {
Dave Jiang8b782fa2015-09-24 13:03:05 -07002548 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
2549 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
2550 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
2551 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
Allen Hubbee26a5842015-04-09 10:33:20 -04002552};
2553
Dave Jiang783dfa62016-11-16 14:03:38 -07002554static const struct intel_ntb_reg skx_reg = {
Dave Jiang939ada52017-02-16 16:22:36 -07002555 .poll_link = skx_poll_link,
Dave Jiang783dfa62016-11-16 14:03:38 -07002556 .link_is_up = xeon_link_is_up,
2557 .db_ioread = skx_db_ioread,
2558 .db_iowrite = skx_db_iowrite,
Dave Jiang5eb449e2017-06-08 12:46:45 -07002559 .db_size = sizeof(u32),
Dave Jiang783dfa62016-11-16 14:03:38 -07002560 .ntb_ctl = SKX_NTBCNTL_OFFSET,
2561 .mw_bar = {2, 4},
2562};
2563
2564static const struct intel_ntb_alt_reg skx_pri_reg = {
2565 .db_bell = SKX_EM_DOORBELL_OFFSET,
2566 .db_clear = SKX_IM_INT_STATUS_OFFSET,
2567 .db_mask = SKX_IM_INT_DISABLE_OFFSET,
2568 .spad = SKX_IM_SPAD_OFFSET,
2569};
2570
2571static const struct intel_ntb_alt_reg skx_b2b_reg = {
2572 .db_bell = SKX_IM_DOORBELL_OFFSET,
2573 .db_clear = SKX_EM_INT_STATUS_OFFSET,
2574 .db_mask = SKX_EM_INT_DISABLE_OFFSET,
2575 .spad = SKX_B2B_SPAD_OFFSET,
2576};
2577
2578static const struct intel_ntb_xlat_reg skx_sec_xlat = {
2579/* .bar0_base = SKX_EMBAR0_OFFSET, */
2580 .bar2_limit = SKX_IMBAR1XLMT_OFFSET,
2581 .bar2_xlat = SKX_IMBAR1XBASE_OFFSET,
2582};
2583
Allen Hubbee26a5842015-04-09 10:33:20 -04002584/* operations for primary side of local ntb */
2585static const struct ntb_dev_ops intel_ntb_ops = {
2586 .mw_count = intel_ntb_mw_count,
Serge Semin443b9a12017-01-11 03:11:33 +03002587 .mw_get_align = intel_ntb_mw_get_align,
Allen Hubbee26a5842015-04-09 10:33:20 -04002588 .mw_set_trans = intel_ntb_mw_set_trans,
Serge Semin443b9a12017-01-11 03:11:33 +03002589 .peer_mw_count = intel_ntb_peer_mw_count,
2590 .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
Allen Hubbee26a5842015-04-09 10:33:20 -04002591 .link_is_up = intel_ntb_link_is_up,
2592 .link_enable = intel_ntb_link_enable,
2593 .link_disable = intel_ntb_link_disable,
2594 .db_is_unsafe = intel_ntb_db_is_unsafe,
2595 .db_valid_mask = intel_ntb_db_valid_mask,
2596 .db_vector_count = intel_ntb_db_vector_count,
2597 .db_vector_mask = intel_ntb_db_vector_mask,
2598 .db_read = intel_ntb_db_read,
2599 .db_clear = intel_ntb_db_clear,
2600 .db_set_mask = intel_ntb_db_set_mask,
2601 .db_clear_mask = intel_ntb_db_clear_mask,
2602 .peer_db_addr = intel_ntb_peer_db_addr,
2603 .peer_db_set = intel_ntb_peer_db_set,
2604 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2605 .spad_count = intel_ntb_spad_count,
2606 .spad_read = intel_ntb_spad_read,
2607 .spad_write = intel_ntb_spad_write,
2608 .peer_spad_addr = intel_ntb_peer_spad_addr,
2609 .peer_spad_read = intel_ntb_peer_spad_read,
2610 .peer_spad_write = intel_ntb_peer_spad_write,
2611};
2612
Dave Jiang783dfa62016-11-16 14:03:38 -07002613static const struct ntb_dev_ops intel_ntb3_ops = {
2614 .mw_count = intel_ntb_mw_count,
Serge Semin443b9a12017-01-11 03:11:33 +03002615 .mw_get_align = intel_ntb_mw_get_align,
Dave Jiang783dfa62016-11-16 14:03:38 -07002616 .mw_set_trans = intel_ntb3_mw_set_trans,
Serge Semin443b9a12017-01-11 03:11:33 +03002617 .peer_mw_count = intel_ntb_peer_mw_count,
2618 .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
Dave Jiang783dfa62016-11-16 14:03:38 -07002619 .link_is_up = intel_ntb_link_is_up,
2620 .link_enable = intel_ntb3_link_enable,
2621 .link_disable = intel_ntb_link_disable,
2622 .db_valid_mask = intel_ntb_db_valid_mask,
2623 .db_vector_count = intel_ntb_db_vector_count,
2624 .db_vector_mask = intel_ntb_db_vector_mask,
2625 .db_read = intel_ntb3_db_read,
2626 .db_clear = intel_ntb3_db_clear,
2627 .db_set_mask = intel_ntb_db_set_mask,
2628 .db_clear_mask = intel_ntb_db_clear_mask,
2629 .peer_db_addr = intel_ntb_peer_db_addr,
2630 .peer_db_set = intel_ntb3_peer_db_set,
2631 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2632 .spad_count = intel_ntb_spad_count,
2633 .spad_read = intel_ntb_spad_read,
2634 .spad_write = intel_ntb_spad_write,
2635 .peer_spad_addr = intel_ntb_peer_spad_addr,
2636 .peer_spad_read = intel_ntb_peer_spad_read,
2637 .peer_spad_write = intel_ntb_peer_spad_write,
2638};
2639
Allen Hubbee26a5842015-04-09 10:33:20 -04002640static const struct file_operations intel_ntb_debugfs_info = {
2641 .owner = THIS_MODULE,
2642 .open = simple_open,
2643 .read = ndev_debugfs_read,
2644};
2645
2646static const struct pci_device_id intel_ntb_pci_tbl[] = {
Jon Masonfce8a7b2012-11-16 19:27:12 -07002647 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002648 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002649 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2650 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002651 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002652 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2653 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2654 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2655 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002656 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002657 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2658 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2659 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2660 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002661 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
Dave Jiang783dfa62016-11-16 14:03:38 -07002662 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002663 {0}
2664};
Allen Hubbee26a5842015-04-09 10:33:20 -04002665MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
Jon Masonfce8a7b2012-11-16 19:27:12 -07002666
Allen Hubbee26a5842015-04-09 10:33:20 -04002667static struct pci_driver intel_ntb_pci_driver = {
2668 .name = KBUILD_MODNAME,
2669 .id_table = intel_ntb_pci_tbl,
2670 .probe = intel_ntb_pci_probe,
2671 .remove = intel_ntb_pci_remove,
Jon Mason6465d022014-04-07 10:55:47 -07002672};
2673
Allen Hubbee26a5842015-04-09 10:33:20 -04002674static int __init intel_ntb_pci_driver_init(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07002675{
Dave Jiang7eb38782015-06-15 08:21:33 -04002676 pr_info("%s %s\n", NTB_DESC, NTB_VER);
2677
Allen Hubbee26a5842015-04-09 10:33:20 -04002678 if (debugfs_initialized())
Jon Mason1517a3f2013-07-30 15:58:49 -07002679 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2680
Allen Hubbee26a5842015-04-09 10:33:20 -04002681 return pci_register_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07002682}
Allen Hubbee26a5842015-04-09 10:33:20 -04002683module_init(intel_ntb_pci_driver_init);
Jon Mason1517a3f2013-07-30 15:58:49 -07002684
Allen Hubbee26a5842015-04-09 10:33:20 -04002685static void __exit intel_ntb_pci_driver_exit(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07002686{
Allen Hubbee26a5842015-04-09 10:33:20 -04002687 pci_unregister_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07002688
Allen Hubbee26a5842015-04-09 10:33:20 -04002689 debugfs_remove_recursive(debugfs_dir);
Jon Mason1517a3f2013-07-30 15:58:49 -07002690}
Allen Hubbee26a5842015-04-09 10:33:20 -04002691module_exit(intel_ntb_pci_driver_exit);