blob: c00238491673766e05bc5bd2d2d3bec4aac30484 [file] [log] [blame]
Jon Masonfce8a7b2012-11-16 19:27:12 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -04008 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -040017 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -070018 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
45 * Intel PCIe NTB Linux driver
46 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
Allen Hubbee26a5842015-04-09 10:33:20 -040050
Jon Masonfce8a7b2012-11-16 19:27:12 -070051#include <linux/debugfs.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070052#include <linux/delay.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070053#include <linux/init.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56#include <linux/pci.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070057#include <linux/random.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070058#include <linux/slab.h>
Allen Hubbee26a5842015-04-09 10:33:20 -040059#include <linux/ntb.h>
60
Allen Hubbeec110bc2015-05-07 06:45:21 -040061#include "ntb_hw_intel.h"
Jon Masonfce8a7b2012-11-16 19:27:12 -070062
Allen Hubbee26a5842015-04-09 10:33:20 -040063#define NTB_NAME "ntb_hw_intel"
64#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
65#define NTB_VER "2.0"
Jon Masonfce8a7b2012-11-16 19:27:12 -070066
Allen Hubbee26a5842015-04-09 10:33:20 -040067MODULE_DESCRIPTION(NTB_DESC);
Jon Masonfce8a7b2012-11-16 19:27:12 -070068MODULE_VERSION(NTB_VER);
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_AUTHOR("Intel Corporation");
71
Allen Hubbee26a5842015-04-09 10:33:20 -040072#define bar0_off(base, bar) ((base) + ((bar) << 2))
73#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
Jon Masonfce8a7b2012-11-16 19:27:12 -070074
Dave Jiang2f887b92015-05-20 12:55:47 -040075static const struct intel_ntb_reg atom_reg;
76static const struct intel_ntb_alt_reg atom_pri_reg;
77static const struct intel_ntb_alt_reg atom_sec_reg;
78static const struct intel_ntb_alt_reg atom_b2b_reg;
79static const struct intel_ntb_xlat_reg atom_pri_xlat;
80static const struct intel_ntb_xlat_reg atom_sec_xlat;
81static const struct intel_ntb_reg xeon_reg;
82static const struct intel_ntb_alt_reg xeon_pri_reg;
83static const struct intel_ntb_alt_reg xeon_sec_reg;
84static const struct intel_ntb_alt_reg xeon_b2b_reg;
85static const struct intel_ntb_xlat_reg xeon_pri_xlat;
86static const struct intel_ntb_xlat_reg xeon_sec_xlat;
87static struct intel_b2b_addr xeon_b2b_usd_addr;
88static struct intel_b2b_addr xeon_b2b_dsd_addr;
Dave Jiang783dfa62016-11-16 14:03:38 -070089static const struct intel_ntb_reg skx_reg;
90static const struct intel_ntb_alt_reg skx_pri_reg;
91static const struct intel_ntb_alt_reg skx_b2b_reg;
92static const struct intel_ntb_xlat_reg skx_sec_xlat;
Allen Hubbe42fefc82015-05-11 05:45:30 -040093static const struct ntb_dev_ops intel_ntb_ops;
Dave Jiang783dfa62016-11-16 14:03:38 -070094static const struct ntb_dev_ops intel_ntb3_ops;
Allen Hubbe42fefc82015-05-11 05:45:30 -040095
96static const struct file_operations intel_ntb_debugfs_info;
97static struct dentry *debugfs_dir;
98
Allen Hubbee26a5842015-04-09 10:33:20 -040099static int b2b_mw_idx = -1;
100module_param(b2b_mw_idx, int, 0644);
101MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
102 "value of zero or positive starts from first mw idx, and a "
103 "negative value starts from last mw idx. Both sides MUST "
104 "set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -0700105
Allen Hubbee26a5842015-04-09 10:33:20 -0400106static unsigned int b2b_mw_share;
107module_param(b2b_mw_share, uint, 0644);
108MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
109 "ntb so that the peer ntb only occupies the first half of "
110 "the mw, so the second half can still be used as a mw. Both "
111 "sides MUST set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -0700112
Dave Jiang2f887b92015-05-20 12:55:47 -0400113module_param_named(xeon_b2b_usd_bar2_addr64,
114 xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
115MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
116 "XEON B2B USD BAR 2 64-bit address");
Allen Hubbee26a5842015-04-09 10:33:20 -0400117
Dave Jiang2f887b92015-05-20 12:55:47 -0400118module_param_named(xeon_b2b_usd_bar4_addr64,
119 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000120MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
Dave Jiang2f887b92015-05-20 12:55:47 -0400121 "XEON B2B USD BAR 4 64-bit address");
Allen Hubbee26a5842015-04-09 10:33:20 -0400122
Dave Jiang2f887b92015-05-20 12:55:47 -0400123module_param_named(xeon_b2b_usd_bar4_addr32,
124 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000125MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400126 "XEON B2B USD split-BAR 4 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400127
Dave Jiang2f887b92015-05-20 12:55:47 -0400128module_param_named(xeon_b2b_usd_bar5_addr32,
129 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000130MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400131 "XEON B2B USD split-BAR 5 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400132
Dave Jiang2f887b92015-05-20 12:55:47 -0400133module_param_named(xeon_b2b_dsd_bar2_addr64,
134 xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
135MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
136 "XEON B2B DSD BAR 2 64-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400137
Dave Jiang2f887b92015-05-20 12:55:47 -0400138module_param_named(xeon_b2b_dsd_bar4_addr64,
139 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000140MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
Dave Jiang2f887b92015-05-20 12:55:47 -0400141 "XEON B2B DSD BAR 4 64-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400142
Dave Jiang2f887b92015-05-20 12:55:47 -0400143module_param_named(xeon_b2b_dsd_bar4_addr32,
144 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000145MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400146 "XEON B2B DSD split-BAR 4 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400147
Dave Jiang2f887b92015-05-20 12:55:47 -0400148module_param_named(xeon_b2b_dsd_bar5_addr32,
149 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000150MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400151 "XEON B2B DSD split-BAR 5 32-bit address");
Jon Mason1517a3f2013-07-30 15:58:49 -0700152
Dave Jiang783dfa62016-11-16 14:03:38 -0700153static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
154static int xeon_init_isr(struct intel_ntb_dev *ndev);
155
Allen Hubbee26a5842015-04-09 10:33:20 -0400156#ifndef ioread64
157#ifdef readq
158#define ioread64 readq
159#else
160#define ioread64 _ioread64
161static inline u64 _ioread64(void __iomem *mmio)
162{
163 u64 low, high;
Jon Mason113bf1c2012-11-16 18:52:57 -0700164
Allen Hubbee26a5842015-04-09 10:33:20 -0400165 low = ioread32(mmio);
166 high = ioread32(mmio + sizeof(u32));
167 return low | (high << 32);
168}
169#endif
170#endif
Jon Masonfce8a7b2012-11-16 19:27:12 -0700171
Allen Hubbee26a5842015-04-09 10:33:20 -0400172#ifndef iowrite64
173#ifdef writeq
174#define iowrite64 writeq
175#else
176#define iowrite64 _iowrite64
177static inline void _iowrite64(u64 val, void __iomem *mmio)
178{
179 iowrite32(val, mmio);
180 iowrite32(val >> 32, mmio + sizeof(u32));
181}
182#endif
183#endif
184
Dave Jiang2f887b92015-05-20 12:55:47 -0400185static inline int pdev_is_atom(struct pci_dev *pdev)
Allen Hubbee26a5842015-04-09 10:33:20 -0400186{
187 switch (pdev->device) {
188 case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
189 return 1;
190 }
191 return 0;
192}
193
Dave Jiang2f887b92015-05-20 12:55:47 -0400194static inline int pdev_is_xeon(struct pci_dev *pdev)
Allen Hubbee26a5842015-04-09 10:33:20 -0400195{
196 switch (pdev->device) {
197 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
198 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
199 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
200 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -0400201 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
Allen Hubbee26a5842015-04-09 10:33:20 -0400202 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
203 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
204 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
205 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -0400206 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
Allen Hubbee26a5842015-04-09 10:33:20 -0400207 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
208 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
209 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
210 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -0400211 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Allen Hubbee26a5842015-04-09 10:33:20 -0400212 return 1;
213 }
214 return 0;
215}
216
Dave Jiang783dfa62016-11-16 14:03:38 -0700217static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
218{
219 if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
220 return 1;
221
222 return 0;
223}
224
Allen Hubbee26a5842015-04-09 10:33:20 -0400225static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
226{
227 ndev->unsafe_flags = 0;
228 ndev->unsafe_flags_ignore = 0;
229
230 /* Only B2B has a workaround to avoid SDOORBELL */
231 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
232 if (!ntb_topo_is_b2b(ndev->ntb.topo))
233 ndev->unsafe_flags |= NTB_UNSAFE_DB;
234
235 /* No low level workaround to avoid SB01BASE */
236 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
237 ndev->unsafe_flags |= NTB_UNSAFE_DB;
238 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
239 }
240}
241
242static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
243 unsigned long flag)
244{
245 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
246}
247
248static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
249 unsigned long flag)
250{
251 flag &= ndev->unsafe_flags;
252 ndev->unsafe_flags_ignore |= flag;
253
254 return !!flag;
255}
256
257static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
258{
Allen Hubbe9a078262015-08-31 09:31:00 -0400259 if (idx < 0 || idx >= ndev->mw_count)
Allen Hubbee26a5842015-04-09 10:33:20 -0400260 return -EINVAL;
261 return ndev->reg->mw_bar[idx];
262}
263
264static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
265 phys_addr_t *db_addr, resource_size_t *db_size,
266 phys_addr_t reg_addr, unsigned long reg)
267{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400268 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
269 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400270
271 if (db_addr) {
272 *db_addr = reg_addr + reg;
273 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
274 }
275
276 if (db_size) {
277 *db_size = ndev->reg->db_size;
278 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
279 }
280
281 return 0;
282}
283
284static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
285 void __iomem *mmio)
286{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400287 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
288 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400289
290 return ndev->reg->db_ioread(mmio);
291}
292
293static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
294 void __iomem *mmio)
295{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400296 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
297 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400298
299 if (db_bits & ~ndev->db_valid_mask)
300 return -EINVAL;
301
302 ndev->reg->db_iowrite(db_bits, mmio);
303
304 return 0;
305}
306
307static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
308 void __iomem *mmio)
309{
310 unsigned long irqflags;
311
Dave Jiangfd839bf2015-06-15 08:22:30 -0400312 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
313 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400314
315 if (db_bits & ~ndev->db_valid_mask)
316 return -EINVAL;
317
318 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
319 {
320 ndev->db_mask |= db_bits;
321 ndev->reg->db_iowrite(ndev->db_mask, mmio);
322 }
323 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
324
325 return 0;
326}
327
328static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
329 void __iomem *mmio)
330{
331 unsigned long irqflags;
332
Dave Jiangfd839bf2015-06-15 08:22:30 -0400333 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
334 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400335
336 if (db_bits & ~ndev->db_valid_mask)
337 return -EINVAL;
338
339 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
340 {
341 ndev->db_mask &= ~db_bits;
342 ndev->reg->db_iowrite(ndev->db_mask, mmio);
343 }
344 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
345
346 return 0;
347}
348
349static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
350{
351 u64 shift, mask;
352
353 shift = ndev->db_vec_shift;
354 mask = BIT_ULL(shift) - 1;
355
356 return mask << (shift * db_vector);
357}
358
359static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
360 phys_addr_t *spad_addr, phys_addr_t reg_addr,
361 unsigned long reg)
362{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400363 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
364 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400365
366 if (idx < 0 || idx >= ndev->spad_count)
367 return -EINVAL;
368
369 if (spad_addr) {
370 *spad_addr = reg_addr + reg + (idx << 2);
371 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
372 }
373
374 return 0;
375}
376
377static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
378 void __iomem *mmio)
379{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400380 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
381 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400382
383 if (idx < 0 || idx >= ndev->spad_count)
384 return 0;
385
386 return ioread32(mmio + (idx << 2));
387}
388
389static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
390 void __iomem *mmio)
391{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400392 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
393 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400394
395 if (idx < 0 || idx >= ndev->spad_count)
396 return -EINVAL;
397
398 iowrite32(val, mmio + (idx << 2));
399
400 return 0;
401}
402
403static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
404{
405 u64 vec_mask;
406
407 vec_mask = ndev_vec_mask(ndev, vec);
408
Dave Jiang783dfa62016-11-16 14:03:38 -0700409 if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
410 vec_mask |= ndev->db_link_mask;
411
Allen Hubbee26a5842015-04-09 10:33:20 -0400412 dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
413
414 ndev->last_ts = jiffies;
415
416 if (vec_mask & ndev->db_link_mask) {
417 if (ndev->reg->poll_link(ndev))
418 ntb_link_event(&ndev->ntb);
419 }
420
421 if (vec_mask & ndev->db_valid_mask)
422 ntb_db_event(&ndev->ntb, vec);
423
424 return IRQ_HANDLED;
425}
426
427static irqreturn_t ndev_vec_isr(int irq, void *dev)
428{
429 struct intel_ntb_vec *nvec = dev;
430
Dave Jiang783dfa62016-11-16 14:03:38 -0700431 dev_dbg(ndev_dev(nvec->ndev), "irq: %d nvec->num: %d\n",
432 irq, nvec->num);
433
Allen Hubbee26a5842015-04-09 10:33:20 -0400434 return ndev_interrupt(nvec->ndev, nvec->num);
435}
436
437static irqreturn_t ndev_irq_isr(int irq, void *dev)
438{
439 struct intel_ntb_dev *ndev = dev;
440
441 return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
442}
443
444static int ndev_init_isr(struct intel_ntb_dev *ndev,
445 int msix_min, int msix_max,
446 int msix_shift, int total_shift)
447{
448 struct pci_dev *pdev;
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400449 int rc, i, msix_count, node;
Allen Hubbee26a5842015-04-09 10:33:20 -0400450
451 pdev = ndev_pdev(ndev);
452
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400453 node = dev_to_node(&pdev->dev);
454
Allen Hubbee26a5842015-04-09 10:33:20 -0400455 /* Mask all doorbell interrupts */
456 ndev->db_mask = ndev->db_valid_mask;
457 ndev->reg->db_iowrite(ndev->db_mask,
458 ndev->self_mmio +
459 ndev->self_reg->db_mask);
460
461 /* Try to set up msix irq */
462
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400463 ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
464 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -0400465 if (!ndev->vec)
466 goto err_msix_vec_alloc;
467
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400468 ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
469 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -0400470 if (!ndev->msix)
471 goto err_msix_alloc;
472
473 for (i = 0; i < msix_max; ++i)
474 ndev->msix[i].entry = i;
475
476 msix_count = pci_enable_msix_range(pdev, ndev->msix,
477 msix_min, msix_max);
478 if (msix_count < 0)
479 goto err_msix_enable;
480
481 for (i = 0; i < msix_count; ++i) {
482 ndev->vec[i].ndev = ndev;
483 ndev->vec[i].num = i;
484 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
485 "ndev_vec_isr", &ndev->vec[i]);
486 if (rc)
487 goto err_msix_request;
488 }
489
Dave Jiang783dfa62016-11-16 14:03:38 -0700490 dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count);
Allen Hubbee26a5842015-04-09 10:33:20 -0400491 ndev->db_vec_count = msix_count;
492 ndev->db_vec_shift = msix_shift;
493 return 0;
494
495err_msix_request:
496 while (i-- > 0)
Christophe JAILLET28734e82016-12-19 06:52:55 +0100497 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
Allen Hubbee26a5842015-04-09 10:33:20 -0400498 pci_disable_msix(pdev);
499err_msix_enable:
500 kfree(ndev->msix);
501err_msix_alloc:
502 kfree(ndev->vec);
503err_msix_vec_alloc:
504 ndev->msix = NULL;
505 ndev->vec = NULL;
506
507 /* Try to set up msi irq */
508
509 rc = pci_enable_msi(pdev);
510 if (rc)
511 goto err_msi_enable;
512
513 rc = request_irq(pdev->irq, ndev_irq_isr, 0,
514 "ndev_irq_isr", ndev);
515 if (rc)
516 goto err_msi_request;
517
518 dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
519 ndev->db_vec_count = 1;
520 ndev->db_vec_shift = total_shift;
521 return 0;
522
523err_msi_request:
524 pci_disable_msi(pdev);
525err_msi_enable:
526
527 /* Try to set up intx irq */
528
529 pci_intx(pdev, 1);
530
531 rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
532 "ndev_irq_isr", ndev);
533 if (rc)
534 goto err_intx_request;
535
536 dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
537 ndev->db_vec_count = 1;
538 ndev->db_vec_shift = total_shift;
539 return 0;
540
541err_intx_request:
542 return rc;
543}
544
545static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
546{
547 struct pci_dev *pdev;
548 int i;
549
550 pdev = ndev_pdev(ndev);
551
552 /* Mask all doorbell interrupts */
553 ndev->db_mask = ndev->db_valid_mask;
554 ndev->reg->db_iowrite(ndev->db_mask,
555 ndev->self_mmio +
556 ndev->self_reg->db_mask);
557
558 if (ndev->msix) {
559 i = ndev->db_vec_count;
560 while (i--)
561 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
562 pci_disable_msix(pdev);
563 kfree(ndev->msix);
564 kfree(ndev->vec);
565 } else {
566 free_irq(pdev->irq, ndev);
567 if (pci_dev_msi_enabled(pdev))
568 pci_disable_msi(pdev);
569 }
570}
571
Dave Jiang783dfa62016-11-16 14:03:38 -0700572static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
573 size_t count, loff_t *offp)
574{
575 struct intel_ntb_dev *ndev;
576 void __iomem *mmio;
577 char *buf;
578 size_t buf_size;
579 ssize_t ret, off;
580 union { u64 v64; u32 v32; u16 v16; } u;
581
582 ndev = filp->private_data;
583 mmio = ndev->self_mmio;
584
585 buf_size = min(count, 0x800ul);
586
587 buf = kmalloc(buf_size, GFP_KERNEL);
588 if (!buf)
589 return -ENOMEM;
590
591 off = 0;
592
593 off += scnprintf(buf + off, buf_size - off,
594 "NTB Device Information:\n");
595
596 off += scnprintf(buf + off, buf_size - off,
597 "Connection Topology -\t%s\n",
598 ntb_topo_string(ndev->ntb.topo));
599
600 off += scnprintf(buf + off, buf_size - off,
601 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
602 off += scnprintf(buf + off, buf_size - off,
603 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
604
605 if (!ndev->reg->link_is_up(ndev))
606 off += scnprintf(buf + off, buf_size - off,
607 "Link Status -\t\tDown\n");
608 else {
609 off += scnprintf(buf + off, buf_size - off,
610 "Link Status -\t\tUp\n");
611 off += scnprintf(buf + off, buf_size - off,
612 "Link Speed -\t\tPCI-E Gen %u\n",
613 NTB_LNK_STA_SPEED(ndev->lnk_sta));
614 off += scnprintf(buf + off, buf_size - off,
615 "Link Width -\t\tx%u\n",
616 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
617 }
618
619 off += scnprintf(buf + off, buf_size - off,
620 "Memory Window Count -\t%u\n", ndev->mw_count);
621 off += scnprintf(buf + off, buf_size - off,
622 "Scratchpad Count -\t%u\n", ndev->spad_count);
623 off += scnprintf(buf + off, buf_size - off,
624 "Doorbell Count -\t%u\n", ndev->db_count);
625 off += scnprintf(buf + off, buf_size - off,
626 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
627 off += scnprintf(buf + off, buf_size - off,
628 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
629
630 off += scnprintf(buf + off, buf_size - off,
631 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
632 off += scnprintf(buf + off, buf_size - off,
633 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
634 off += scnprintf(buf + off, buf_size - off,
635 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
636
637 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
638 off += scnprintf(buf + off, buf_size - off,
639 "Doorbell Mask -\t\t%#llx\n", u.v64);
640
641 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
642 off += scnprintf(buf + off, buf_size - off,
643 "Doorbell Bell -\t\t%#llx\n", u.v64);
644
645 off += scnprintf(buf + off, buf_size - off,
646 "\nNTB Incoming XLAT:\n");
647
648 u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
649 off += scnprintf(buf + off, buf_size - off,
650 "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
651
652 u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
653 off += scnprintf(buf + off, buf_size - off,
654 "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
655
656 u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
657 off += scnprintf(buf + off, buf_size - off,
658 "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
659
660 u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
661 off += scnprintf(buf + off, buf_size - off,
662 "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
663
664 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
665 off += scnprintf(buf + off, buf_size - off,
666 "\nNTB Outgoing B2B XLAT:\n");
667
668 u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
669 off += scnprintf(buf + off, buf_size - off,
670 "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
671
672 u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
673 off += scnprintf(buf + off, buf_size - off,
674 "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
675
676 u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
677 off += scnprintf(buf + off, buf_size - off,
678 "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
679
680 u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
681 off += scnprintf(buf + off, buf_size - off,
682 "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
683
684 off += scnprintf(buf + off, buf_size - off,
685 "\nNTB Secondary BAR:\n");
686
687 u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
688 off += scnprintf(buf + off, buf_size - off,
689 "EMBAR0 -\t\t%#018llx\n", u.v64);
690
691 u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
692 off += scnprintf(buf + off, buf_size - off,
693 "EMBAR1 -\t\t%#018llx\n", u.v64);
694
695 u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
696 off += scnprintf(buf + off, buf_size - off,
697 "EMBAR2 -\t\t%#018llx\n", u.v64);
698 }
699
700 off += scnprintf(buf + off, buf_size - off,
701 "\nNTB Statistics:\n");
702
703 u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
704 off += scnprintf(buf + off, buf_size - off,
705 "Upstream Memory Miss -\t%u\n", u.v16);
706
707 off += scnprintf(buf + off, buf_size - off,
708 "\nNTB Hardware Errors:\n");
709
710 if (!pci_read_config_word(ndev->ntb.pdev,
711 SKX_DEVSTS_OFFSET, &u.v16))
712 off += scnprintf(buf + off, buf_size - off,
713 "DEVSTS -\t\t%#06x\n", u.v16);
714
715 if (!pci_read_config_word(ndev->ntb.pdev,
716 SKX_LINK_STATUS_OFFSET, &u.v16))
717 off += scnprintf(buf + off, buf_size - off,
718 "LNKSTS -\t\t%#06x\n", u.v16);
719
720 if (!pci_read_config_dword(ndev->ntb.pdev,
721 SKX_UNCERRSTS_OFFSET, &u.v32))
722 off += scnprintf(buf + off, buf_size - off,
723 "UNCERRSTS -\t\t%#06x\n", u.v32);
724
725 if (!pci_read_config_dword(ndev->ntb.pdev,
726 SKX_CORERRSTS_OFFSET, &u.v32))
727 off += scnprintf(buf + off, buf_size - off,
728 "CORERRSTS -\t\t%#06x\n", u.v32);
729
730 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
731 kfree(buf);
732 return ret;
733}
734
735static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
736 size_t count, loff_t *offp)
Allen Hubbee26a5842015-04-09 10:33:20 -0400737{
738 struct intel_ntb_dev *ndev;
Allen Hubbe40895272016-07-22 09:38:22 -0400739 struct pci_dev *pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400740 void __iomem *mmio;
741 char *buf;
742 size_t buf_size;
743 ssize_t ret, off;
Allen Hubbe40895272016-07-22 09:38:22 -0400744 union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
Allen Hubbee26a5842015-04-09 10:33:20 -0400745
746 ndev = filp->private_data;
Allen Hubbe40895272016-07-22 09:38:22 -0400747 pdev = ndev_pdev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -0400748 mmio = ndev->self_mmio;
749
750 buf_size = min(count, 0x800ul);
751
752 buf = kmalloc(buf_size, GFP_KERNEL);
753 if (!buf)
754 return -ENOMEM;
755
756 off = 0;
757
758 off += scnprintf(buf + off, buf_size - off,
759 "NTB Device Information:\n");
760
761 off += scnprintf(buf + off, buf_size - off,
762 "Connection Topology -\t%s\n",
763 ntb_topo_string(ndev->ntb.topo));
764
Allen Hubbe2aa2a772015-08-31 09:30:59 -0400765 if (ndev->b2b_idx != UINT_MAX) {
766 off += scnprintf(buf + off, buf_size - off,
767 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
768 off += scnprintf(buf + off, buf_size - off,
769 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
770 }
771
Allen Hubbee26a5842015-04-09 10:33:20 -0400772 off += scnprintf(buf + off, buf_size - off,
773 "BAR4 Split -\t\t%s\n",
774 ndev->bar4_split ? "yes" : "no");
775
776 off += scnprintf(buf + off, buf_size - off,
777 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
778 off += scnprintf(buf + off, buf_size - off,
779 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
780
781 if (!ndev->reg->link_is_up(ndev)) {
782 off += scnprintf(buf + off, buf_size - off,
783 "Link Status -\t\tDown\n");
784 } else {
785 off += scnprintf(buf + off, buf_size - off,
786 "Link Status -\t\tUp\n");
787 off += scnprintf(buf + off, buf_size - off,
788 "Link Speed -\t\tPCI-E Gen %u\n",
789 NTB_LNK_STA_SPEED(ndev->lnk_sta));
790 off += scnprintf(buf + off, buf_size - off,
791 "Link Width -\t\tx%u\n",
792 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
793 }
794
795 off += scnprintf(buf + off, buf_size - off,
796 "Memory Window Count -\t%u\n", ndev->mw_count);
797 off += scnprintf(buf + off, buf_size - off,
798 "Scratchpad Count -\t%u\n", ndev->spad_count);
799 off += scnprintf(buf + off, buf_size - off,
800 "Doorbell Count -\t%u\n", ndev->db_count);
801 off += scnprintf(buf + off, buf_size - off,
802 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
803 off += scnprintf(buf + off, buf_size - off,
804 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
805
806 off += scnprintf(buf + off, buf_size - off,
807 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
808 off += scnprintf(buf + off, buf_size - off,
809 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
810 off += scnprintf(buf + off, buf_size - off,
811 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
812
813 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
814 off += scnprintf(buf + off, buf_size - off,
815 "Doorbell Mask -\t\t%#llx\n", u.v64);
816
817 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
818 off += scnprintf(buf + off, buf_size - off,
819 "Doorbell Bell -\t\t%#llx\n", u.v64);
820
821 off += scnprintf(buf + off, buf_size - off,
Allen Hubbe40895272016-07-22 09:38:22 -0400822 "\nNTB Window Size:\n");
823
824 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
825 off += scnprintf(buf + off, buf_size - off,
826 "PBAR23SZ %hhu\n", u.v8);
827 if (!ndev->bar4_split) {
828 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
829 off += scnprintf(buf + off, buf_size - off,
830 "PBAR45SZ %hhu\n", u.v8);
831 } else {
832 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
833 off += scnprintf(buf + off, buf_size - off,
834 "PBAR4SZ %hhu\n", u.v8);
835 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
836 off += scnprintf(buf + off, buf_size - off,
837 "PBAR5SZ %hhu\n", u.v8);
838 }
839
840 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
841 off += scnprintf(buf + off, buf_size - off,
842 "SBAR23SZ %hhu\n", u.v8);
843 if (!ndev->bar4_split) {
844 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
845 off += scnprintf(buf + off, buf_size - off,
846 "SBAR45SZ %hhu\n", u.v8);
847 } else {
848 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
849 off += scnprintf(buf + off, buf_size - off,
850 "SBAR4SZ %hhu\n", u.v8);
851 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
852 off += scnprintf(buf + off, buf_size - off,
853 "SBAR5SZ %hhu\n", u.v8);
854 }
855
856 off += scnprintf(buf + off, buf_size - off,
Allen Hubbee26a5842015-04-09 10:33:20 -0400857 "\nNTB Incoming XLAT:\n");
858
859 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
860 off += scnprintf(buf + off, buf_size - off,
861 "XLAT23 -\t\t%#018llx\n", u.v64);
862
Dave Jiangbf44fe42015-06-18 05:17:30 -0400863 if (ndev->bar4_split) {
864 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
865 off += scnprintf(buf + off, buf_size - off,
866 "XLAT4 -\t\t\t%#06x\n", u.v32);
867
868 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
869 off += scnprintf(buf + off, buf_size - off,
870 "XLAT5 -\t\t\t%#06x\n", u.v32);
871 } else {
872 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
873 off += scnprintf(buf + off, buf_size - off,
874 "XLAT45 -\t\t%#018llx\n", u.v64);
875 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400876
877 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
878 off += scnprintf(buf + off, buf_size - off,
879 "LMT23 -\t\t\t%#018llx\n", u.v64);
880
Dave Jiangbf44fe42015-06-18 05:17:30 -0400881 if (ndev->bar4_split) {
882 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
883 off += scnprintf(buf + off, buf_size - off,
884 "LMT4 -\t\t\t%#06x\n", u.v32);
885 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
886 off += scnprintf(buf + off, buf_size - off,
887 "LMT5 -\t\t\t%#06x\n", u.v32);
888 } else {
889 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
890 off += scnprintf(buf + off, buf_size - off,
891 "LMT45 -\t\t\t%#018llx\n", u.v64);
892 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400893
Allen Hubbe95f14642016-07-22 09:38:23 -0400894 if (pdev_is_xeon(pdev)) {
Allen Hubbee26a5842015-04-09 10:33:20 -0400895 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
896 off += scnprintf(buf + off, buf_size - off,
897 "\nNTB Outgoing B2B XLAT:\n");
898
Dave Jiang2f887b92015-05-20 12:55:47 -0400899 u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400900 off += scnprintf(buf + off, buf_size - off,
901 "B2B XLAT23 -\t\t%#018llx\n", u.v64);
902
Dave Jiangbf44fe42015-06-18 05:17:30 -0400903 if (ndev->bar4_split) {
904 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
905 off += scnprintf(buf + off, buf_size - off,
906 "B2B XLAT4 -\t\t%#06x\n",
907 u.v32);
908 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
909 off += scnprintf(buf + off, buf_size - off,
910 "B2B XLAT5 -\t\t%#06x\n",
911 u.v32);
912 } else {
913 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
914 off += scnprintf(buf + off, buf_size - off,
915 "B2B XLAT45 -\t\t%#018llx\n",
916 u.v64);
917 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400918
Dave Jiang2f887b92015-05-20 12:55:47 -0400919 u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400920 off += scnprintf(buf + off, buf_size - off,
921 "B2B LMT23 -\t\t%#018llx\n", u.v64);
922
Dave Jiangbf44fe42015-06-18 05:17:30 -0400923 if (ndev->bar4_split) {
924 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
925 off += scnprintf(buf + off, buf_size - off,
926 "B2B LMT4 -\t\t%#06x\n",
927 u.v32);
928 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
929 off += scnprintf(buf + off, buf_size - off,
930 "B2B LMT5 -\t\t%#06x\n",
931 u.v32);
932 } else {
933 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
934 off += scnprintf(buf + off, buf_size - off,
935 "B2B LMT45 -\t\t%#018llx\n",
936 u.v64);
937 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400938
939 off += scnprintf(buf + off, buf_size - off,
940 "\nNTB Secondary BAR:\n");
941
Dave Jiang2f887b92015-05-20 12:55:47 -0400942 u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400943 off += scnprintf(buf + off, buf_size - off,
944 "SBAR01 -\t\t%#018llx\n", u.v64);
945
Dave Jiang2f887b92015-05-20 12:55:47 -0400946 u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400947 off += scnprintf(buf + off, buf_size - off,
948 "SBAR23 -\t\t%#018llx\n", u.v64);
949
Dave Jiangbf44fe42015-06-18 05:17:30 -0400950 if (ndev->bar4_split) {
951 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
952 off += scnprintf(buf + off, buf_size - off,
953 "SBAR4 -\t\t\t%#06x\n", u.v32);
954 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
955 off += scnprintf(buf + off, buf_size - off,
956 "SBAR5 -\t\t\t%#06x\n", u.v32);
957 } else {
958 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
959 off += scnprintf(buf + off, buf_size - off,
960 "SBAR45 -\t\t%#018llx\n",
961 u.v64);
962 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400963 }
964
965 off += scnprintf(buf + off, buf_size - off,
Dave Jiang2f887b92015-05-20 12:55:47 -0400966 "\nXEON NTB Statistics:\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400967
Dave Jiang2f887b92015-05-20 12:55:47 -0400968 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400969 off += scnprintf(buf + off, buf_size - off,
970 "Upstream Memory Miss -\t%u\n", u.v16);
971
972 off += scnprintf(buf + off, buf_size - off,
Dave Jiang2f887b92015-05-20 12:55:47 -0400973 "\nXEON NTB Hardware Errors:\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400974
Allen Hubbe95f14642016-07-22 09:38:23 -0400975 if (!pci_read_config_word(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400976 XEON_DEVSTS_OFFSET, &u.v16))
Allen Hubbee26a5842015-04-09 10:33:20 -0400977 off += scnprintf(buf + off, buf_size - off,
978 "DEVSTS -\t\t%#06x\n", u.v16);
979
Allen Hubbe95f14642016-07-22 09:38:23 -0400980 if (!pci_read_config_word(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400981 XEON_LINK_STATUS_OFFSET, &u.v16))
Allen Hubbee26a5842015-04-09 10:33:20 -0400982 off += scnprintf(buf + off, buf_size - off,
983 "LNKSTS -\t\t%#06x\n", u.v16);
984
Allen Hubbe95f14642016-07-22 09:38:23 -0400985 if (!pci_read_config_dword(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400986 XEON_UNCERRSTS_OFFSET, &u.v32))
Allen Hubbee26a5842015-04-09 10:33:20 -0400987 off += scnprintf(buf + off, buf_size - off,
988 "UNCERRSTS -\t\t%#06x\n", u.v32);
989
Allen Hubbe95f14642016-07-22 09:38:23 -0400990 if (!pci_read_config_dword(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400991 XEON_CORERRSTS_OFFSET, &u.v32))
Allen Hubbee26a5842015-04-09 10:33:20 -0400992 off += scnprintf(buf + off, buf_size - off,
993 "CORERRSTS -\t\t%#06x\n", u.v32);
994 }
995
996 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
997 kfree(buf);
998 return ret;
999}
1000
Dave Jiang783dfa62016-11-16 14:03:38 -07001001static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
1002 size_t count, loff_t *offp)
1003{
1004 struct intel_ntb_dev *ndev = filp->private_data;
1005
1006 if (pdev_is_xeon(ndev->ntb.pdev) ||
1007 pdev_is_atom(ndev->ntb.pdev))
1008 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
1009 else if (pdev_is_skx_xeon(ndev->ntb.pdev))
1010 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
1011
1012 return -ENXIO;
1013}
1014
Allen Hubbee26a5842015-04-09 10:33:20 -04001015static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
1016{
1017 if (!debugfs_dir) {
1018 ndev->debugfs_dir = NULL;
1019 ndev->debugfs_info = NULL;
1020 } else {
1021 ndev->debugfs_dir =
1022 debugfs_create_dir(ndev_name(ndev), debugfs_dir);
1023 if (!ndev->debugfs_dir)
1024 ndev->debugfs_info = NULL;
1025 else
1026 ndev->debugfs_info =
1027 debugfs_create_file("info", S_IRUSR,
1028 ndev->debugfs_dir, ndev,
1029 &intel_ntb_debugfs_info);
1030 }
1031}
1032
1033static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
1034{
1035 debugfs_remove_recursive(ndev->debugfs_dir);
1036}
1037
1038static int intel_ntb_mw_count(struct ntb_dev *ntb)
1039{
1040 return ntb_ndev(ntb)->mw_count;
1041}
1042
1043static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
1044 phys_addr_t *base,
1045 resource_size_t *size,
1046 resource_size_t *align,
1047 resource_size_t *align_size)
1048{
1049 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1050 int bar;
1051
1052 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1053 idx += 1;
1054
1055 bar = ndev_mw_to_bar(ndev, idx);
1056 if (bar < 0)
1057 return bar;
1058
1059 if (base)
1060 *base = pci_resource_start(ndev->ntb.pdev, bar) +
1061 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1062
1063 if (size)
1064 *size = pci_resource_len(ndev->ntb.pdev, bar) -
1065 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1066
1067 if (align)
1068 *align = pci_resource_len(ndev->ntb.pdev, bar);
1069
1070 if (align_size)
1071 *align_size = 1;
1072
1073 return 0;
1074}
1075
1076static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
1077 dma_addr_t addr, resource_size_t size)
1078{
1079 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1080 unsigned long base_reg, xlat_reg, limit_reg;
1081 resource_size_t bar_size, mw_size;
1082 void __iomem *mmio;
1083 u64 base, limit, reg_val;
1084 int bar;
1085
1086 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1087 idx += 1;
1088
1089 bar = ndev_mw_to_bar(ndev, idx);
1090 if (bar < 0)
1091 return bar;
1092
1093 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1094
1095 if (idx == ndev->b2b_idx)
1096 mw_size = bar_size - ndev->b2b_off;
1097 else
1098 mw_size = bar_size;
1099
1100 /* hardware requires that addr is aligned to bar size */
1101 if (addr & (bar_size - 1))
1102 return -EINVAL;
1103
1104 /* make sure the range fits in the usable mw size */
1105 if (size > mw_size)
1106 return -EINVAL;
1107
1108 mmio = ndev->self_mmio;
1109 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
1110 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
1111 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
1112
1113 if (bar < 4 || !ndev->bar4_split) {
Dave Jiang703872c2015-11-19 14:00:54 -07001114 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
Allen Hubbee26a5842015-04-09 10:33:20 -04001115
1116 /* Set the limit if supported, if size is not mw_size */
1117 if (limit_reg && size != mw_size)
1118 limit = base + size;
1119 else
1120 limit = 0;
1121
1122 /* set and verify setting the translation address */
1123 iowrite64(addr, mmio + xlat_reg);
1124 reg_val = ioread64(mmio + xlat_reg);
1125 if (reg_val != addr) {
1126 iowrite64(0, mmio + xlat_reg);
1127 return -EIO;
1128 }
1129
1130 /* set and verify setting the limit */
1131 iowrite64(limit, mmio + limit_reg);
1132 reg_val = ioread64(mmio + limit_reg);
1133 if (reg_val != limit) {
1134 iowrite64(base, mmio + limit_reg);
1135 iowrite64(0, mmio + xlat_reg);
1136 return -EIO;
1137 }
1138 } else {
1139 /* split bar addr range must all be 32 bit */
1140 if (addr & (~0ull << 32))
1141 return -EINVAL;
1142 if ((addr + size) & (~0ull << 32))
1143 return -EINVAL;
1144
Dave Jiang703872c2015-11-19 14:00:54 -07001145 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
Allen Hubbee26a5842015-04-09 10:33:20 -04001146
1147 /* Set the limit if supported, if size is not mw_size */
1148 if (limit_reg && size != mw_size)
1149 limit = base + size;
1150 else
1151 limit = 0;
1152
1153 /* set and verify setting the translation address */
1154 iowrite32(addr, mmio + xlat_reg);
1155 reg_val = ioread32(mmio + xlat_reg);
1156 if (reg_val != addr) {
1157 iowrite32(0, mmio + xlat_reg);
1158 return -EIO;
1159 }
1160
1161 /* set and verify setting the limit */
1162 iowrite32(limit, mmio + limit_reg);
1163 reg_val = ioread32(mmio + limit_reg);
1164 if (reg_val != limit) {
1165 iowrite32(base, mmio + limit_reg);
1166 iowrite32(0, mmio + xlat_reg);
1167 return -EIO;
1168 }
1169 }
1170
1171 return 0;
1172}
1173
1174static int intel_ntb_link_is_up(struct ntb_dev *ntb,
1175 enum ntb_speed *speed,
1176 enum ntb_width *width)
1177{
1178 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1179
1180 if (ndev->reg->link_is_up(ndev)) {
1181 if (speed)
1182 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
1183 if (width)
1184 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
1185 return 1;
1186 } else {
1187 /* TODO MAYBE: is it possible to observe the link speed and
1188 * width while link is training? */
1189 if (speed)
1190 *speed = NTB_SPEED_NONE;
1191 if (width)
1192 *width = NTB_WIDTH_NONE;
1193 return 0;
1194 }
1195}
1196
1197static int intel_ntb_link_enable(struct ntb_dev *ntb,
1198 enum ntb_speed max_speed,
1199 enum ntb_width max_width)
1200{
1201 struct intel_ntb_dev *ndev;
1202 u32 ntb_ctl;
1203
1204 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1205
1206 if (ndev->ntb.topo == NTB_TOPO_SEC)
1207 return -EINVAL;
1208
1209 dev_dbg(ndev_dev(ndev),
1210 "Enabling link with max_speed %d max_width %d\n",
1211 max_speed, max_width);
1212 if (max_speed != NTB_SPEED_AUTO)
1213 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
1214 if (max_width != NTB_WIDTH_AUTO)
1215 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
1216
1217 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1218 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1219 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1220 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1221 if (ndev->bar4_split)
1222 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
1223 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1224
1225 return 0;
1226}
1227
1228static int intel_ntb_link_disable(struct ntb_dev *ntb)
1229{
1230 struct intel_ntb_dev *ndev;
1231 u32 ntb_cntl;
1232
1233 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1234
1235 if (ndev->ntb.topo == NTB_TOPO_SEC)
1236 return -EINVAL;
1237
1238 dev_dbg(ndev_dev(ndev), "Disabling link\n");
1239
1240 /* Bring NTB link down */
1241 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1242 ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1243 ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1244 if (ndev->bar4_split)
1245 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1246 ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1247 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1248
1249 return 0;
1250}
1251
1252static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1253{
1254 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1255}
1256
1257static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1258{
1259 return ntb_ndev(ntb)->db_valid_mask;
1260}
1261
1262static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1263{
1264 struct intel_ntb_dev *ndev;
1265
1266 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1267
1268 return ndev->db_vec_count;
1269}
1270
1271static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1272{
1273 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1274
1275 if (db_vector < 0 || db_vector > ndev->db_vec_count)
1276 return 0;
1277
1278 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1279}
1280
1281static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1282{
1283 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1284
1285 return ndev_db_read(ndev,
1286 ndev->self_mmio +
1287 ndev->self_reg->db_bell);
1288}
1289
1290static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1291{
1292 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1293
1294 return ndev_db_write(ndev, db_bits,
1295 ndev->self_mmio +
1296 ndev->self_reg->db_bell);
1297}
1298
1299static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1300{
1301 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1302
1303 return ndev_db_set_mask(ndev, db_bits,
1304 ndev->self_mmio +
1305 ndev->self_reg->db_mask);
1306}
1307
1308static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1309{
1310 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1311
1312 return ndev_db_clear_mask(ndev, db_bits,
1313 ndev->self_mmio +
1314 ndev->self_reg->db_mask);
1315}
1316
1317static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
1318 phys_addr_t *db_addr,
1319 resource_size_t *db_size)
1320{
1321 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1322
1323 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1324 ndev->peer_reg->db_bell);
1325}
1326
1327static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1328{
1329 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1330
1331 return ndev_db_write(ndev, db_bits,
1332 ndev->peer_mmio +
1333 ndev->peer_reg->db_bell);
1334}
1335
1336static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1337{
1338 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1339}
1340
1341static int intel_ntb_spad_count(struct ntb_dev *ntb)
1342{
1343 struct intel_ntb_dev *ndev;
1344
1345 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1346
1347 return ndev->spad_count;
1348}
1349
1350static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1351{
1352 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1353
1354 return ndev_spad_read(ndev, idx,
1355 ndev->self_mmio +
1356 ndev->self_reg->spad);
1357}
1358
1359static int intel_ntb_spad_write(struct ntb_dev *ntb,
1360 int idx, u32 val)
1361{
1362 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1363
1364 return ndev_spad_write(ndev, idx, val,
1365 ndev->self_mmio +
1366 ndev->self_reg->spad);
1367}
1368
1369static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1370 phys_addr_t *spad_addr)
1371{
1372 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1373
1374 return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1375 ndev->peer_reg->spad);
1376}
1377
1378static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1379{
1380 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1381
1382 return ndev_spad_read(ndev, idx,
1383 ndev->peer_mmio +
1384 ndev->peer_reg->spad);
1385}
1386
1387static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1388 int idx, u32 val)
1389{
1390 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1391
1392 return ndev_spad_write(ndev, idx, val,
1393 ndev->peer_mmio +
1394 ndev->peer_reg->spad);
1395}
1396
Dave Jiang2f887b92015-05-20 12:55:47 -04001397/* ATOM */
Allen Hubbee26a5842015-04-09 10:33:20 -04001398
Dave Jiang2f887b92015-05-20 12:55:47 -04001399static u64 atom_db_ioread(void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04001400{
1401 return ioread64(mmio);
1402}
1403
Dave Jiang2f887b92015-05-20 12:55:47 -04001404static void atom_db_iowrite(u64 bits, void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04001405{
1406 iowrite64(bits, mmio);
1407}
1408
Dave Jiang2f887b92015-05-20 12:55:47 -04001409static int atom_poll_link(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001410{
1411 u32 ntb_ctl;
1412
Dave Jiang2f887b92015-05-20 12:55:47 -04001413 ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001414
1415 if (ntb_ctl == ndev->ntb_ctl)
1416 return 0;
1417
1418 ndev->ntb_ctl = ntb_ctl;
1419
Dave Jiang2f887b92015-05-20 12:55:47 -04001420 ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001421
1422 return 1;
1423}
1424
Dave Jiang2f887b92015-05-20 12:55:47 -04001425static int atom_link_is_up(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001426{
Dave Jiang2f887b92015-05-20 12:55:47 -04001427 return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
Allen Hubbee26a5842015-04-09 10:33:20 -04001428}
1429
Dave Jiang2f887b92015-05-20 12:55:47 -04001430static int atom_link_is_err(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001431{
Dave Jiang2f887b92015-05-20 12:55:47 -04001432 if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
1433 & ATOM_LTSSMSTATEJMP_FORCEDETECT)
Allen Hubbee26a5842015-04-09 10:33:20 -04001434 return 1;
1435
Dave Jiang2f887b92015-05-20 12:55:47 -04001436 if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
1437 & ATOM_IBIST_ERR_OFLOW)
Allen Hubbee26a5842015-04-09 10:33:20 -04001438 return 1;
1439
1440 return 0;
1441}
1442
Dave Jiang2f887b92015-05-20 12:55:47 -04001443static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04001444{
Dave Jiang2f887b92015-05-20 12:55:47 -04001445 switch (ppd & ATOM_PPD_TOPO_MASK) {
1446 case ATOM_PPD_TOPO_B2B_USD:
Allen Hubbee26a5842015-04-09 10:33:20 -04001447 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1448 return NTB_TOPO_B2B_USD;
1449
Dave Jiang2f887b92015-05-20 12:55:47 -04001450 case ATOM_PPD_TOPO_B2B_DSD:
Allen Hubbee26a5842015-04-09 10:33:20 -04001451 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1452 return NTB_TOPO_B2B_DSD;
1453
Dave Jiang2f887b92015-05-20 12:55:47 -04001454 case ATOM_PPD_TOPO_PRI_USD:
1455 case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1456 case ATOM_PPD_TOPO_SEC_USD:
1457 case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04001458 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1459 return NTB_TOPO_NONE;
1460 }
1461
1462 dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1463 return NTB_TOPO_NONE;
1464}
1465
Dave Jiang2f887b92015-05-20 12:55:47 -04001466static void atom_link_hb(struct work_struct *work)
Allen Hubbee26a5842015-04-09 10:33:20 -04001467{
1468 struct intel_ntb_dev *ndev = hb_ndev(work);
1469 unsigned long poll_ts;
1470 void __iomem *mmio;
1471 u32 status32;
1472
Dave Jiang2f887b92015-05-20 12:55:47 -04001473 poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
Allen Hubbee26a5842015-04-09 10:33:20 -04001474
1475 /* Delay polling the link status if an interrupt was received,
1476 * unless the cached link status says the link is down.
1477 */
Dave Jiang2f887b92015-05-20 12:55:47 -04001478 if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
Allen Hubbee26a5842015-04-09 10:33:20 -04001479 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1480 return;
1481 }
1482
Dave Jiang2f887b92015-05-20 12:55:47 -04001483 if (atom_poll_link(ndev))
Allen Hubbee26a5842015-04-09 10:33:20 -04001484 ntb_link_event(&ndev->ntb);
1485
Dave Jiang2f887b92015-05-20 12:55:47 -04001486 if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
1487 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
Allen Hubbee26a5842015-04-09 10:33:20 -04001488 return;
1489 }
1490
1491 /* Link is down with error: recover the link! */
1492
1493 mmio = ndev->self_mmio;
1494
1495 /* Driver resets the NTB ModPhy lanes - magic! */
Dave Jiang2f887b92015-05-20 12:55:47 -04001496 iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
1497 iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
1498 iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
1499 iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
Allen Hubbee26a5842015-04-09 10:33:20 -04001500
1501 /* Driver waits 100ms to allow the NTB ModPhy to settle */
1502 msleep(100);
1503
1504 /* Clear AER Errors, write to clear */
Dave Jiang2f887b92015-05-20 12:55:47 -04001505 status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001506 dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1507 status32 &= PCI_ERR_COR_REP_ROLL;
Dave Jiang2f887b92015-05-20 12:55:47 -04001508 iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001509
1510 /* Clear unexpected electrical idle event in LTSSM, write to clear */
Dave Jiang2f887b92015-05-20 12:55:47 -04001511 status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001512 dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
Dave Jiang2f887b92015-05-20 12:55:47 -04001513 status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
1514 iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001515
1516 /* Clear DeSkew Buffer error, write to clear */
Dave Jiang2f887b92015-05-20 12:55:47 -04001517 status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001518 dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
Dave Jiang2f887b92015-05-20 12:55:47 -04001519 status32 |= ATOM_DESKEWSTS_DBERR;
1520 iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001521
Dave Jiang2f887b92015-05-20 12:55:47 -04001522 status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001523 dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
Dave Jiang2f887b92015-05-20 12:55:47 -04001524 status32 &= ATOM_IBIST_ERR_OFLOW;
1525 iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001526
1527 /* Releases the NTB state machine to allow the link to retrain */
Dave Jiang2f887b92015-05-20 12:55:47 -04001528 status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001529 dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
Dave Jiang2f887b92015-05-20 12:55:47 -04001530 status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
1531 iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001532
1533 /* There is a potential race between the 2 NTB devices recovering at the
1534 * same time. If the times are the same, the link will not recover and
1535 * the driver will be stuck in this loop forever. Add a random interval
1536 * to the recovery time to prevent this race.
1537 */
Dave Jiang2f887b92015-05-20 12:55:47 -04001538 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
1539 + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
Allen Hubbee26a5842015-04-09 10:33:20 -04001540}
1541
Dave Jiang2f887b92015-05-20 12:55:47 -04001542static int atom_init_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001543{
1544 int rc;
1545
Dave Jiang2f887b92015-05-20 12:55:47 -04001546 rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
1547 ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
Allen Hubbee26a5842015-04-09 10:33:20 -04001548 if (rc)
1549 return rc;
1550
Dave Jiang2f887b92015-05-20 12:55:47 -04001551 /* ATOM doesn't have link status interrupt, poll on that platform */
Allen Hubbee26a5842015-04-09 10:33:20 -04001552 ndev->last_ts = jiffies;
Dave Jiang2f887b92015-05-20 12:55:47 -04001553 INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
1554 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
Allen Hubbee26a5842015-04-09 10:33:20 -04001555
1556 return 0;
1557}
1558
Dave Jiang2f887b92015-05-20 12:55:47 -04001559static void atom_deinit_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001560{
1561 cancel_delayed_work_sync(&ndev->hb_timer);
1562 ndev_deinit_isr(ndev);
1563}
1564
Dave Jiang2f887b92015-05-20 12:55:47 -04001565static int atom_init_ntb(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001566{
Dave Jiang2f887b92015-05-20 12:55:47 -04001567 ndev->mw_count = ATOM_MW_COUNT;
1568 ndev->spad_count = ATOM_SPAD_COUNT;
1569 ndev->db_count = ATOM_DB_COUNT;
Allen Hubbee26a5842015-04-09 10:33:20 -04001570
1571 switch (ndev->ntb.topo) {
1572 case NTB_TOPO_B2B_USD:
1573 case NTB_TOPO_B2B_DSD:
Dave Jiang2f887b92015-05-20 12:55:47 -04001574 ndev->self_reg = &atom_pri_reg;
1575 ndev->peer_reg = &atom_b2b_reg;
1576 ndev->xlat_reg = &atom_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04001577
1578 /* Enable Bus Master and Memory Space on the secondary side */
1579 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
Dave Jiang2f887b92015-05-20 12:55:47 -04001580 ndev->self_mmio + ATOM_SPCICMD_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001581
1582 break;
1583
1584 default:
1585 return -EINVAL;
1586 }
1587
1588 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1589
1590 return 0;
1591}
1592
Dave Jiang2f887b92015-05-20 12:55:47 -04001593static int atom_init_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001594{
1595 u32 ppd;
1596 int rc;
1597
Dave Jiang2f887b92015-05-20 12:55:47 -04001598 rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04001599 if (rc)
1600 return -EIO;
1601
Dave Jiang2f887b92015-05-20 12:55:47 -04001602 ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04001603 if (ndev->ntb.topo == NTB_TOPO_NONE)
1604 return -EINVAL;
1605
Dave Jiang2f887b92015-05-20 12:55:47 -04001606 rc = atom_init_ntb(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001607 if (rc)
1608 return rc;
1609
Dave Jiang2f887b92015-05-20 12:55:47 -04001610 rc = atom_init_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001611 if (rc)
1612 return rc;
1613
1614 if (ndev->ntb.topo != NTB_TOPO_SEC) {
1615 /* Initiate PCI-E link training */
Dave Jiang2f887b92015-05-20 12:55:47 -04001616 rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
1617 ppd | ATOM_PPD_INIT_LINK);
Allen Hubbee26a5842015-04-09 10:33:20 -04001618 if (rc)
1619 return rc;
1620 }
1621
1622 return 0;
1623}
1624
Dave Jiang2f887b92015-05-20 12:55:47 -04001625static void atom_deinit_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001626{
Dave Jiang2f887b92015-05-20 12:55:47 -04001627 atom_deinit_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001628}
1629
Dave Jiang783dfa62016-11-16 14:03:38 -07001630/* Skylake Xeon NTB */
1631
Dave Jiang939ada52017-02-16 16:22:36 -07001632static int skx_poll_link(struct intel_ntb_dev *ndev)
1633{
1634 u16 reg_val;
1635 int rc;
1636
1637 ndev->reg->db_iowrite(ndev->db_link_mask,
1638 ndev->self_mmio +
1639 ndev->self_reg->db_clear);
1640
1641 rc = pci_read_config_word(ndev->ntb.pdev,
1642 SKX_LINK_STATUS_OFFSET, &reg_val);
1643 if (rc)
1644 return 0;
1645
1646 if (reg_val == ndev->lnk_sta)
1647 return 0;
1648
1649 ndev->lnk_sta = reg_val;
1650
1651 return 1;
1652}
1653
Dave Jiang783dfa62016-11-16 14:03:38 -07001654static u64 skx_db_ioread(void __iomem *mmio)
1655{
1656 return ioread64(mmio);
1657}
1658
1659static void skx_db_iowrite(u64 bits, void __iomem *mmio)
1660{
1661 iowrite64(bits, mmio);
1662}
1663
1664static int skx_init_isr(struct intel_ntb_dev *ndev)
1665{
1666 int i;
1667
1668 /*
1669 * The MSIX vectors and the interrupt status bits are not lined up
1670 * on Skylake. By default the link status bit is bit 32, however it
1671 * is by default MSIX vector0. We need to fixup to line them up.
1672 * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
1673 */
1674
1675 for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
1676 iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
1677
1678 /* move link status down one as workaround */
1679 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
1680 iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
1681 ndev->self_mmio + SKX_INTVEC_OFFSET +
1682 (SKX_DB_MSIX_VECTOR_COUNT - 1));
1683 }
1684
1685 return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
1686 SKX_DB_MSIX_VECTOR_COUNT,
1687 SKX_DB_MSIX_VECTOR_SHIFT,
1688 SKX_DB_TOTAL_SHIFT);
1689}
1690
1691static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
1692 const struct intel_b2b_addr *addr,
1693 const struct intel_b2b_addr *peer_addr)
1694{
1695 struct pci_dev *pdev;
1696 void __iomem *mmio;
1697 resource_size_t bar_size;
1698 phys_addr_t bar_addr;
1699 int b2b_bar;
1700 u8 bar_sz;
1701
1702 pdev = ndev_pdev(ndev);
1703 mmio = ndev->self_mmio;
1704
1705 if (ndev->b2b_idx == UINT_MAX) {
1706 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1707 b2b_bar = 0;
1708 ndev->b2b_off = 0;
1709 } else {
1710 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1711 if (b2b_bar < 0)
1712 return -EIO;
1713
1714 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
1715
1716 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1717
1718 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
1719
1720 if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
1721 dev_dbg(ndev_dev(ndev),
1722 "b2b using first half of bar\n");
1723 ndev->b2b_off = bar_size >> 1;
1724 } else if (bar_size >= XEON_B2B_MIN_SIZE) {
1725 dev_dbg(ndev_dev(ndev),
1726 "b2b using whole bar\n");
1727 ndev->b2b_off = 0;
1728 --ndev->mw_count;
1729 } else {
1730 dev_dbg(ndev_dev(ndev),
1731 "b2b bar size is too small\n");
1732 return -EIO;
1733 }
1734 }
1735
1736 /*
1737 * Reset the secondary bar sizes to match the primary bar sizes,
1738 * except disable or halve the size of the b2b secondary bar.
1739 */
1740 pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
1741 dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz);
1742 if (b2b_bar == 1) {
1743 if (ndev->b2b_off)
1744 bar_sz -= 1;
1745 else
1746 bar_sz = 0;
1747 }
1748
1749 pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
1750 pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
1751 dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz);
1752
1753 pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
1754 dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz);
1755 if (b2b_bar == 2) {
1756 if (ndev->b2b_off)
1757 bar_sz -= 1;
1758 else
1759 bar_sz = 0;
1760 }
1761
1762 pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
1763 pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
1764 dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz);
1765
1766 /* SBAR01 hit by first part of the b2b bar */
1767 if (b2b_bar == 0)
1768 bar_addr = addr->bar0_addr;
1769 else if (b2b_bar == 1)
1770 bar_addr = addr->bar2_addr64;
1771 else if (b2b_bar == 2)
1772 bar_addr = addr->bar4_addr64;
1773 else
1774 return -EIO;
1775
1776 /* setup incoming bar limits == base addrs (zero length windows) */
1777 bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
1778 iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
1779 bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
1780 dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr);
1781
1782 bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1783 iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
1784 bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
1785 dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr);
1786
1787 /* zero incoming translation addrs */
1788 iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
1789 iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
1790
1791 ndev->peer_mmio = ndev->self_mmio;
1792
1793 return 0;
1794}
1795
1796static int skx_init_ntb(struct intel_ntb_dev *ndev)
1797{
1798 int rc;
1799
1800
1801 ndev->mw_count = XEON_MW_COUNT;
1802 ndev->spad_count = SKX_SPAD_COUNT;
1803 ndev->db_count = SKX_DB_COUNT;
1804 ndev->db_link_mask = SKX_DB_LINK_BIT;
1805
1806 /* DB fixup for using 31 right now */
1807 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
1808 ndev->db_link_mask |= BIT_ULL(31);
1809
1810 switch (ndev->ntb.topo) {
1811 case NTB_TOPO_B2B_USD:
1812 case NTB_TOPO_B2B_DSD:
1813 ndev->self_reg = &skx_pri_reg;
1814 ndev->peer_reg = &skx_b2b_reg;
1815 ndev->xlat_reg = &skx_sec_xlat;
1816
1817 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1818 rc = skx_setup_b2b_mw(ndev,
1819 &xeon_b2b_dsd_addr,
1820 &xeon_b2b_usd_addr);
1821 } else {
1822 rc = skx_setup_b2b_mw(ndev,
1823 &xeon_b2b_usd_addr,
1824 &xeon_b2b_dsd_addr);
1825 }
1826
1827 if (rc)
1828 return rc;
1829
1830 /* Enable Bus Master and Memory Space on the secondary side */
1831 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1832 ndev->self_mmio + SKX_SPCICMD_OFFSET);
1833
1834 break;
1835
1836 default:
1837 return -EINVAL;
1838 }
1839
1840 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1841
1842 ndev->reg->db_iowrite(ndev->db_valid_mask,
1843 ndev->self_mmio +
1844 ndev->self_reg->db_mask);
1845
1846 return 0;
1847}
1848
1849static int skx_init_dev(struct intel_ntb_dev *ndev)
1850{
1851 struct pci_dev *pdev;
1852 u8 ppd;
1853 int rc;
1854
1855 pdev = ndev_pdev(ndev);
1856
1857 ndev->reg = &skx_reg;
1858
1859 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1860 if (rc)
1861 return -EIO;
1862
1863 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1864 dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1865 ntb_topo_string(ndev->ntb.topo));
1866 if (ndev->ntb.topo == NTB_TOPO_NONE)
1867 return -EINVAL;
1868
1869 if (pdev_is_skx_xeon(pdev))
1870 ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
1871
1872 rc = skx_init_ntb(ndev);
1873 if (rc)
1874 return rc;
1875
1876 return skx_init_isr(ndev);
1877}
1878
1879static int intel_ntb3_link_enable(struct ntb_dev *ntb,
1880 enum ntb_speed max_speed,
1881 enum ntb_width max_width)
1882{
1883 struct intel_ntb_dev *ndev;
1884 u32 ntb_ctl;
1885
1886 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1887
1888 dev_dbg(ndev_dev(ndev),
1889 "Enabling link with max_speed %d max_width %d\n",
1890 max_speed, max_width);
1891
1892 if (max_speed != NTB_SPEED_AUTO)
1893 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
1894 if (max_width != NTB_WIDTH_AUTO)
1895 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
1896
1897 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1898 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1899 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1900 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1901 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1902
1903 return 0;
1904}
1905static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
1906 dma_addr_t addr, resource_size_t size)
1907{
1908 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1909 unsigned long xlat_reg, limit_reg;
1910 resource_size_t bar_size, mw_size;
1911 void __iomem *mmio;
1912 u64 base, limit, reg_val;
1913 int bar;
1914
1915 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1916 idx += 1;
1917
1918 bar = ndev_mw_to_bar(ndev, idx);
1919 if (bar < 0)
1920 return bar;
1921
1922 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1923
1924 if (idx == ndev->b2b_idx)
1925 mw_size = bar_size - ndev->b2b_off;
1926 else
1927 mw_size = bar_size;
1928
1929 /* hardware requires that addr is aligned to bar size */
1930 if (addr & (bar_size - 1))
1931 return -EINVAL;
1932
1933 /* make sure the range fits in the usable mw size */
1934 if (size > mw_size)
1935 return -EINVAL;
1936
1937 mmio = ndev->self_mmio;
1938 xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
1939 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
1940 base = pci_resource_start(ndev->ntb.pdev, bar);
1941
1942 /* Set the limit if supported, if size is not mw_size */
1943 if (limit_reg && size != mw_size)
1944 limit = base + size;
1945 else
1946 limit = base + mw_size;
1947
1948 /* set and verify setting the translation address */
1949 iowrite64(addr, mmio + xlat_reg);
1950 reg_val = ioread64(mmio + xlat_reg);
1951 if (reg_val != addr) {
1952 iowrite64(0, mmio + xlat_reg);
1953 return -EIO;
1954 }
1955
1956 dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
1957
1958 /* set and verify setting the limit */
1959 iowrite64(limit, mmio + limit_reg);
1960 reg_val = ioread64(mmio + limit_reg);
1961 if (reg_val != limit) {
1962 iowrite64(base, mmio + limit_reg);
1963 iowrite64(0, mmio + xlat_reg);
1964 return -EIO;
1965 }
1966
1967 dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
1968
1969 /* setup the EP */
1970 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
1971 base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
1972 base &= ~0xf;
1973
1974 if (limit_reg && size != mw_size)
1975 limit = base + size;
1976 else
1977 limit = base + mw_size;
1978
1979 /* set and verify setting the limit */
1980 iowrite64(limit, mmio + limit_reg);
1981 reg_val = ioread64(mmio + limit_reg);
1982 if (reg_val != limit) {
1983 iowrite64(base, mmio + limit_reg);
1984 iowrite64(0, mmio + xlat_reg);
1985 return -EIO;
1986 }
1987
1988 dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
1989
1990 return 0;
1991}
1992
1993static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1994{
1995 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1996 int bit;
1997
1998 if (db_bits & ~ndev->db_valid_mask)
1999 return -EINVAL;
2000
2001 while (db_bits) {
2002 bit = __ffs(db_bits);
2003 iowrite32(1, ndev->peer_mmio +
2004 ndev->peer_reg->db_bell + (bit * 4));
2005 db_bits &= db_bits - 1;
2006 }
2007
2008 return 0;
2009}
2010
2011static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
2012{
2013 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
2014
2015 return ndev_db_read(ndev,
2016 ndev->self_mmio +
2017 ndev->self_reg->db_clear);
2018}
2019
2020static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
2021{
2022 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
2023
2024 return ndev_db_write(ndev, db_bits,
2025 ndev->self_mmio +
2026 ndev->self_reg->db_clear);
2027}
2028
Dave Jiang2f887b92015-05-20 12:55:47 -04002029/* XEON */
Allen Hubbee26a5842015-04-09 10:33:20 -04002030
Dave Jiang2f887b92015-05-20 12:55:47 -04002031static u64 xeon_db_ioread(void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04002032{
2033 return (u64)ioread16(mmio);
2034}
2035
Dave Jiang2f887b92015-05-20 12:55:47 -04002036static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04002037{
2038 iowrite16((u16)bits, mmio);
2039}
2040
Dave Jiang2f887b92015-05-20 12:55:47 -04002041static int xeon_poll_link(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002042{
2043 u16 reg_val;
2044 int rc;
2045
2046 ndev->reg->db_iowrite(ndev->db_link_mask,
2047 ndev->self_mmio +
2048 ndev->self_reg->db_bell);
2049
2050 rc = pci_read_config_word(ndev->ntb.pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -04002051 XEON_LINK_STATUS_OFFSET, &reg_val);
Allen Hubbee26a5842015-04-09 10:33:20 -04002052 if (rc)
2053 return 0;
2054
2055 if (reg_val == ndev->lnk_sta)
2056 return 0;
2057
2058 ndev->lnk_sta = reg_val;
2059
2060 return 1;
2061}
2062
Dave Jiang2f887b92015-05-20 12:55:47 -04002063static int xeon_link_is_up(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002064{
Dave Jiang5ae0beb2015-05-19 16:59:34 -04002065 if (ndev->ntb.topo == NTB_TOPO_SEC)
2066 return 1;
2067
Allen Hubbee26a5842015-04-09 10:33:20 -04002068 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
2069}
2070
Dave Jiang2f887b92015-05-20 12:55:47 -04002071static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04002072{
Dave Jiang2f887b92015-05-20 12:55:47 -04002073 switch (ppd & XEON_PPD_TOPO_MASK) {
2074 case XEON_PPD_TOPO_B2B_USD:
Allen Hubbee26a5842015-04-09 10:33:20 -04002075 return NTB_TOPO_B2B_USD;
2076
Dave Jiang2f887b92015-05-20 12:55:47 -04002077 case XEON_PPD_TOPO_B2B_DSD:
Allen Hubbee26a5842015-04-09 10:33:20 -04002078 return NTB_TOPO_B2B_DSD;
2079
Dave Jiang2f887b92015-05-20 12:55:47 -04002080 case XEON_PPD_TOPO_PRI_USD:
2081 case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04002082 return NTB_TOPO_PRI;
2083
Dave Jiang2f887b92015-05-20 12:55:47 -04002084 case XEON_PPD_TOPO_SEC_USD:
2085 case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04002086 return NTB_TOPO_SEC;
2087 }
2088
2089 return NTB_TOPO_NONE;
2090}
2091
Dave Jiang2f887b92015-05-20 12:55:47 -04002092static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04002093{
Dave Jiang2f887b92015-05-20 12:55:47 -04002094 if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002095 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
2096 return 1;
2097 }
2098 return 0;
2099}
2100
Dave Jiang2f887b92015-05-20 12:55:47 -04002101static int xeon_init_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002102{
Dave Jiang2f887b92015-05-20 12:55:47 -04002103 return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
2104 XEON_DB_MSIX_VECTOR_COUNT,
2105 XEON_DB_MSIX_VECTOR_SHIFT,
2106 XEON_DB_TOTAL_SHIFT);
Allen Hubbee26a5842015-04-09 10:33:20 -04002107}
2108
Dave Jiang2f887b92015-05-20 12:55:47 -04002109static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002110{
2111 ndev_deinit_isr(ndev);
2112}
2113
Dave Jiang2f887b92015-05-20 12:55:47 -04002114static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
2115 const struct intel_b2b_addr *addr,
2116 const struct intel_b2b_addr *peer_addr)
Allen Hubbee26a5842015-04-09 10:33:20 -04002117{
2118 struct pci_dev *pdev;
2119 void __iomem *mmio;
2120 resource_size_t bar_size;
2121 phys_addr_t bar_addr;
2122 int b2b_bar;
2123 u8 bar_sz;
2124
2125 pdev = ndev_pdev(ndev);
2126 mmio = ndev->self_mmio;
2127
Allen Hubbe2aa2a772015-08-31 09:30:59 -04002128 if (ndev->b2b_idx == UINT_MAX) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002129 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
2130 b2b_bar = 0;
2131 ndev->b2b_off = 0;
2132 } else {
2133 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
2134 if (b2b_bar < 0)
2135 return -EIO;
2136
2137 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
2138
2139 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
2140
2141 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
2142
Dave Jiang2f887b92015-05-20 12:55:47 -04002143 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002144 dev_dbg(ndev_dev(ndev),
2145 "b2b using first half of bar\n");
2146 ndev->b2b_off = bar_size >> 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04002147 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002148 dev_dbg(ndev_dev(ndev),
2149 "b2b using whole bar\n");
2150 ndev->b2b_off = 0;
2151 --ndev->mw_count;
2152 } else {
2153 dev_dbg(ndev_dev(ndev),
2154 "b2b bar size is too small\n");
2155 return -EIO;
2156 }
2157 }
2158
2159 /* Reset the secondary bar sizes to match the primary bar sizes,
2160 * except disable or halve the size of the b2b secondary bar.
2161 *
2162 * Note: code for each specific bar size register, because the register
2163 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
2164 */
Dave Jiang2f887b92015-05-20 12:55:47 -04002165 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002166 dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
2167 if (b2b_bar == 2) {
2168 if (ndev->b2b_off)
2169 bar_sz -= 1;
2170 else
2171 bar_sz = 0;
2172 }
Dave Jiang2f887b92015-05-20 12:55:47 -04002173 pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
2174 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002175 dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
2176
2177 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002178 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002179 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
2180 if (b2b_bar == 4) {
2181 if (ndev->b2b_off)
2182 bar_sz -= 1;
2183 else
2184 bar_sz = 0;
2185 }
Dave Jiang2f887b92015-05-20 12:55:47 -04002186 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
2187 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002188 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
2189 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002190 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002191 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
2192 if (b2b_bar == 4) {
2193 if (ndev->b2b_off)
2194 bar_sz -= 1;
2195 else
2196 bar_sz = 0;
2197 }
Dave Jiang2f887b92015-05-20 12:55:47 -04002198 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
2199 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002200 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
2201
Dave Jiang2f887b92015-05-20 12:55:47 -04002202 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002203 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
2204 if (b2b_bar == 5) {
2205 if (ndev->b2b_off)
2206 bar_sz -= 1;
2207 else
2208 bar_sz = 0;
2209 }
Dave Jiang2f887b92015-05-20 12:55:47 -04002210 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
2211 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002212 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
2213 }
2214
2215 /* SBAR01 hit by first part of the b2b bar */
2216 if (b2b_bar == 0)
2217 bar_addr = addr->bar0_addr;
2218 else if (b2b_bar == 2)
2219 bar_addr = addr->bar2_addr64;
2220 else if (b2b_bar == 4 && !ndev->bar4_split)
2221 bar_addr = addr->bar4_addr64;
2222 else if (b2b_bar == 4)
2223 bar_addr = addr->bar4_addr32;
2224 else if (b2b_bar == 5)
2225 bar_addr = addr->bar5_addr32;
2226 else
2227 return -EIO;
2228
2229 dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
Dave Jiang2f887b92015-05-20 12:55:47 -04002230 iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002231
2232 /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
2233 * The b2b bar is either disabled above, or configured half-size, and
2234 * it starts at the PBAR xlat + offset.
2235 */
2236
2237 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002238 iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
2239 bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002240 dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
2241
2242 if (!ndev->bar4_split) {
2243 bar_addr = addr->bar4_addr64 +
2244 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002245 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
2246 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002247 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
2248 } else {
2249 bar_addr = addr->bar4_addr32 +
2250 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002251 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
2252 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002253 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
2254
2255 bar_addr = addr->bar5_addr32 +
2256 (b2b_bar == 5 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002257 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
2258 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002259 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
2260 }
2261
2262 /* setup incoming bar limits == base addrs (zero length windows) */
2263
2264 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002265 iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
2266 bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002267 dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
2268
2269 if (!ndev->bar4_split) {
2270 bar_addr = addr->bar4_addr64 +
2271 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002272 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
2273 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002274 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
2275 } else {
2276 bar_addr = addr->bar4_addr32 +
2277 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002278 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
2279 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002280 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
2281
2282 bar_addr = addr->bar5_addr32 +
2283 (b2b_bar == 5 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002284 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
2285 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002286 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
2287 }
2288
2289 /* zero incoming translation addrs */
Dave Jiang2f887b92015-05-20 12:55:47 -04002290 iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002291
2292 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002293 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002294 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002295 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
2296 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002297 }
2298
2299 /* zero outgoing translation limits (whole bar size windows) */
Dave Jiang2f887b92015-05-20 12:55:47 -04002300 iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002301 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002302 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002303 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002304 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
2305 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002306 }
2307
2308 /* set outgoing translation offsets */
2309 bar_addr = peer_addr->bar2_addr64;
Dave Jiang2f887b92015-05-20 12:55:47 -04002310 iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
2311 bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002312 dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
2313
2314 if (!ndev->bar4_split) {
2315 bar_addr = peer_addr->bar4_addr64;
Dave Jiang2f887b92015-05-20 12:55:47 -04002316 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
2317 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002318 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
2319 } else {
2320 bar_addr = peer_addr->bar4_addr32;
Dave Jiang2f887b92015-05-20 12:55:47 -04002321 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
2322 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002323 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
2324
2325 bar_addr = peer_addr->bar5_addr32;
Dave Jiang2f887b92015-05-20 12:55:47 -04002326 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
2327 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002328 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
2329 }
2330
2331 /* set the translation offset for b2b registers */
2332 if (b2b_bar == 0)
2333 bar_addr = peer_addr->bar0_addr;
2334 else if (b2b_bar == 2)
2335 bar_addr = peer_addr->bar2_addr64;
2336 else if (b2b_bar == 4 && !ndev->bar4_split)
2337 bar_addr = peer_addr->bar4_addr64;
2338 else if (b2b_bar == 4)
2339 bar_addr = peer_addr->bar4_addr32;
2340 else if (b2b_bar == 5)
2341 bar_addr = peer_addr->bar5_addr32;
2342 else
2343 return -EIO;
2344
2345 /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
2346 dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
Dave Jiang2f887b92015-05-20 12:55:47 -04002347 iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
2348 iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
Allen Hubbee26a5842015-04-09 10:33:20 -04002349
2350 if (b2b_bar) {
2351 /* map peer ntb mmio config space registers */
2352 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
Dave Jiang2f887b92015-05-20 12:55:47 -04002353 XEON_B2B_MIN_SIZE);
Allen Hubbee26a5842015-04-09 10:33:20 -04002354 if (!ndev->peer_mmio)
2355 return -EIO;
Dave Jiang25ea9f22016-10-27 11:06:44 -07002356
2357 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
Allen Hubbee26a5842015-04-09 10:33:20 -04002358 }
2359
2360 return 0;
2361}
2362
Dave Jiang2f887b92015-05-20 12:55:47 -04002363static int xeon_init_ntb(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002364{
2365 int rc;
Dave Jiang5ae0beb2015-05-19 16:59:34 -04002366 u32 ntb_ctl;
Allen Hubbee26a5842015-04-09 10:33:20 -04002367
2368 if (ndev->bar4_split)
2369 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
2370 else
Dave Jiang2f887b92015-05-20 12:55:47 -04002371 ndev->mw_count = XEON_MW_COUNT;
Allen Hubbee26a5842015-04-09 10:33:20 -04002372
Dave Jiang2f887b92015-05-20 12:55:47 -04002373 ndev->spad_count = XEON_SPAD_COUNT;
2374 ndev->db_count = XEON_DB_COUNT;
2375 ndev->db_link_mask = XEON_DB_LINK_BIT;
Allen Hubbee26a5842015-04-09 10:33:20 -04002376
2377 switch (ndev->ntb.topo) {
2378 case NTB_TOPO_PRI:
2379 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2380 dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
2381 return -EINVAL;
2382 }
Dave Jiang5ae0beb2015-05-19 16:59:34 -04002383
2384 /* enable link to allow secondary side device to appear */
2385 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
2386 ntb_ctl &= ~NTB_CTL_DISABLE;
2387 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
2388
Allen Hubbee26a5842015-04-09 10:33:20 -04002389 /* use half the spads for the peer */
2390 ndev->spad_count >>= 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04002391 ndev->self_reg = &xeon_pri_reg;
2392 ndev->peer_reg = &xeon_sec_reg;
2393 ndev->xlat_reg = &xeon_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04002394 break;
2395
2396 case NTB_TOPO_SEC:
2397 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2398 dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
2399 return -EINVAL;
2400 }
2401 /* use half the spads for the peer */
2402 ndev->spad_count >>= 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04002403 ndev->self_reg = &xeon_sec_reg;
2404 ndev->peer_reg = &xeon_pri_reg;
2405 ndev->xlat_reg = &xeon_pri_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04002406 break;
2407
2408 case NTB_TOPO_B2B_USD:
2409 case NTB_TOPO_B2B_DSD:
Dave Jiang2f887b92015-05-20 12:55:47 -04002410 ndev->self_reg = &xeon_pri_reg;
2411 ndev->peer_reg = &xeon_b2b_reg;
2412 ndev->xlat_reg = &xeon_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04002413
2414 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002415 ndev->peer_reg = &xeon_pri_reg;
Allen Hubbee26a5842015-04-09 10:33:20 -04002416
2417 if (b2b_mw_idx < 0)
2418 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
2419 else
2420 ndev->b2b_idx = b2b_mw_idx;
2421
Allen Hubbe2aa2a772015-08-31 09:30:59 -04002422 if (ndev->b2b_idx >= ndev->mw_count) {
2423 dev_dbg(ndev_dev(ndev),
2424 "b2b_mw_idx %d invalid for mw_count %u\n",
2425 b2b_mw_idx, ndev->mw_count);
2426 return -EINVAL;
2427 }
2428
Allen Hubbee26a5842015-04-09 10:33:20 -04002429 dev_dbg(ndev_dev(ndev),
2430 "setting up b2b mw idx %d means %d\n",
2431 b2b_mw_idx, ndev->b2b_idx);
2432
2433 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
2434 dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
2435 ndev->db_count -= 1;
2436 }
2437
2438 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002439 rc = xeon_setup_b2b_mw(ndev,
2440 &xeon_b2b_dsd_addr,
2441 &xeon_b2b_usd_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002442 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002443 rc = xeon_setup_b2b_mw(ndev,
2444 &xeon_b2b_usd_addr,
2445 &xeon_b2b_dsd_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002446 }
2447 if (rc)
2448 return rc;
2449
2450 /* Enable Bus Master and Memory Space on the secondary side */
2451 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
Dave Jiang2f887b92015-05-20 12:55:47 -04002452 ndev->self_mmio + XEON_SPCICMD_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002453
2454 break;
2455
2456 default:
2457 return -EINVAL;
2458 }
2459
2460 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
2461
2462 ndev->reg->db_iowrite(ndev->db_valid_mask,
2463 ndev->self_mmio +
2464 ndev->self_reg->db_mask);
2465
2466 return 0;
2467}
2468
Dave Jiang2f887b92015-05-20 12:55:47 -04002469static int xeon_init_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002470{
2471 struct pci_dev *pdev;
2472 u8 ppd;
2473 int rc, mem;
2474
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002475 pdev = ndev_pdev(ndev);
2476
2477 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002478 /* There is a Xeon hardware errata related to writes to SDOORBELL or
2479 * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
2480 * which may hang the system. To workaround this use the second memory
2481 * window to access the interrupt and scratch pad registers on the
2482 * remote system.
2483 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002484 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2485 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2486 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2487 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2488 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2489 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2490 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2491 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2492 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2493 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2494 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2495 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002496 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2497 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2498 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002499 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
2500 break;
2501 }
Allen Hubbee26a5842015-04-09 10:33:20 -04002502
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002503 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002504 /* There is a hardware errata related to accessing any register in
2505 * SB01BASE in the presence of bidirectional traffic crossing the NTB.
2506 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002507 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2508 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2509 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2510 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2511 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2512 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002513 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2514 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2515 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002516 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
2517 break;
2518 }
Allen Hubbee26a5842015-04-09 10:33:20 -04002519
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002520 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002521 /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
2522 * mirrored to the remote system. Shrink the number of bits by one,
2523 * since bit 14 is the last bit.
2524 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002525 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2526 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2527 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2528 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2529 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2530 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2531 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2532 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2533 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2534 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2535 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2536 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002537 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2538 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2539 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002540 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
2541 break;
2542 }
Allen Hubbee26a5842015-04-09 10:33:20 -04002543
Dave Jiang2f887b92015-05-20 12:55:47 -04002544 ndev->reg = &xeon_reg;
Allen Hubbee26a5842015-04-09 10:33:20 -04002545
Dave Jiang2f887b92015-05-20 12:55:47 -04002546 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04002547 if (rc)
2548 return -EIO;
2549
Dave Jiang2f887b92015-05-20 12:55:47 -04002550 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04002551 dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
2552 ntb_topo_string(ndev->ntb.topo));
2553 if (ndev->ntb.topo == NTB_TOPO_NONE)
2554 return -EINVAL;
2555
2556 if (ndev->ntb.topo != NTB_TOPO_SEC) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002557 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04002558 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
2559 ppd, ndev->bar4_split);
2560 } else {
2561 /* This is a way for transparent BAR to figure out if we are
2562 * doing split BAR or not. There is no way for the hw on the
2563 * transparent side to know and set the PPD.
2564 */
2565 mem = pci_select_bars(pdev, IORESOURCE_MEM);
2566 ndev->bar4_split = hweight32(mem) ==
2567 HSX_SPLIT_BAR_MW_COUNT + 1;
2568 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
2569 mem, ndev->bar4_split);
2570 }
2571
Dave Jiang2f887b92015-05-20 12:55:47 -04002572 rc = xeon_init_ntb(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002573 if (rc)
2574 return rc;
2575
Dave Jiang2f887b92015-05-20 12:55:47 -04002576 return xeon_init_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002577}
2578
Dave Jiang2f887b92015-05-20 12:55:47 -04002579static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002580{
Dave Jiang2f887b92015-05-20 12:55:47 -04002581 xeon_deinit_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002582}
2583
2584static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
2585{
2586 int rc;
2587
2588 pci_set_drvdata(pdev, ndev);
2589
2590 rc = pci_enable_device(pdev);
2591 if (rc)
2592 goto err_pci_enable;
2593
2594 rc = pci_request_regions(pdev, NTB_NAME);
2595 if (rc)
2596 goto err_pci_regions;
2597
2598 pci_set_master(pdev);
2599
2600 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2601 if (rc) {
2602 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2603 if (rc)
2604 goto err_dma_mask;
2605 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
2606 }
2607
2608 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2609 if (rc) {
2610 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2611 if (rc)
2612 goto err_dma_mask;
2613 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
2614 }
2615
2616 ndev->self_mmio = pci_iomap(pdev, 0, 0);
2617 if (!ndev->self_mmio) {
2618 rc = -EIO;
2619 goto err_mmio;
2620 }
2621 ndev->peer_mmio = ndev->self_mmio;
Dave Jiang25ea9f22016-10-27 11:06:44 -07002622 ndev->peer_addr = pci_resource_start(pdev, 0);
Allen Hubbee26a5842015-04-09 10:33:20 -04002623
2624 return 0;
2625
2626err_mmio:
2627err_dma_mask:
2628 pci_clear_master(pdev);
2629 pci_release_regions(pdev);
2630err_pci_regions:
2631 pci_disable_device(pdev);
2632err_pci_enable:
2633 pci_set_drvdata(pdev, NULL);
2634 return rc;
2635}
2636
2637static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
2638{
2639 struct pci_dev *pdev = ndev_pdev(ndev);
2640
2641 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
2642 pci_iounmap(pdev, ndev->peer_mmio);
2643 pci_iounmap(pdev, ndev->self_mmio);
2644
2645 pci_clear_master(pdev);
2646 pci_release_regions(pdev);
2647 pci_disable_device(pdev);
2648 pci_set_drvdata(pdev, NULL);
2649}
2650
2651static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
2652 struct pci_dev *pdev)
2653{
2654 ndev->ntb.pdev = pdev;
2655 ndev->ntb.topo = NTB_TOPO_NONE;
2656 ndev->ntb.ops = &intel_ntb_ops;
2657
2658 ndev->b2b_off = 0;
Allen Hubbe2aa2a772015-08-31 09:30:59 -04002659 ndev->b2b_idx = UINT_MAX;
Allen Hubbee26a5842015-04-09 10:33:20 -04002660
2661 ndev->bar4_split = 0;
2662
2663 ndev->mw_count = 0;
2664 ndev->spad_count = 0;
2665 ndev->db_count = 0;
2666 ndev->db_vec_count = 0;
2667 ndev->db_vec_shift = 0;
2668
2669 ndev->ntb_ctl = 0;
2670 ndev->lnk_sta = 0;
2671
2672 ndev->db_valid_mask = 0;
2673 ndev->db_link_mask = 0;
2674 ndev->db_mask = 0;
2675
2676 spin_lock_init(&ndev->db_mask_lock);
2677}
2678
2679static int intel_ntb_pci_probe(struct pci_dev *pdev,
2680 const struct pci_device_id *id)
2681{
2682 struct intel_ntb_dev *ndev;
Allen Hubbe0e041fb2015-05-19 12:04:52 -04002683 int rc, node;
2684
2685 node = dev_to_node(&pdev->dev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002686
Dave Jiang2f887b92015-05-20 12:55:47 -04002687 if (pdev_is_atom(pdev)) {
Allen Hubbe0e041fb2015-05-19 12:04:52 -04002688 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -04002689 if (!ndev) {
2690 rc = -ENOMEM;
2691 goto err_ndev;
2692 }
2693
2694 ndev_init_struct(ndev, pdev);
2695
2696 rc = intel_ntb_init_pci(ndev, pdev);
2697 if (rc)
2698 goto err_init_pci;
2699
Dave Jiang2f887b92015-05-20 12:55:47 -04002700 rc = atom_init_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002701 if (rc)
2702 goto err_init_dev;
2703
Dave Jiang2f887b92015-05-20 12:55:47 -04002704 } else if (pdev_is_xeon(pdev)) {
Allen Hubbe0e041fb2015-05-19 12:04:52 -04002705 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -04002706 if (!ndev) {
2707 rc = -ENOMEM;
2708 goto err_ndev;
2709 }
2710
2711 ndev_init_struct(ndev, pdev);
2712
2713 rc = intel_ntb_init_pci(ndev, pdev);
2714 if (rc)
2715 goto err_init_pci;
2716
Dave Jiang2f887b92015-05-20 12:55:47 -04002717 rc = xeon_init_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002718 if (rc)
2719 goto err_init_dev;
2720
Dave Jiang783dfa62016-11-16 14:03:38 -07002721 } else if (pdev_is_skx_xeon(pdev)) {
2722 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2723 if (!ndev) {
2724 rc = -ENOMEM;
2725 goto err_ndev;
2726 }
2727
2728 ndev_init_struct(ndev, pdev);
2729 ndev->ntb.ops = &intel_ntb3_ops;
2730
2731 rc = intel_ntb_init_pci(ndev, pdev);
2732 if (rc)
2733 goto err_init_pci;
2734
2735 rc = skx_init_dev(ndev);
2736 if (rc)
2737 goto err_init_dev;
2738
Allen Hubbee26a5842015-04-09 10:33:20 -04002739 } else {
2740 rc = -EINVAL;
2741 goto err_ndev;
2742 }
2743
2744 ndev_reset_unsafe_flags(ndev);
2745
2746 ndev->reg->poll_link(ndev);
2747
2748 ndev_init_debugfs(ndev);
2749
2750 rc = ntb_register_device(&ndev->ntb);
2751 if (rc)
2752 goto err_register;
2753
Dave Jiang7eb38782015-06-15 08:21:33 -04002754 dev_info(&pdev->dev, "NTB device registered.\n");
2755
Allen Hubbee26a5842015-04-09 10:33:20 -04002756 return 0;
2757
2758err_register:
2759 ndev_deinit_debugfs(ndev);
Dave Jiang2f887b92015-05-20 12:55:47 -04002760 if (pdev_is_atom(pdev))
2761 atom_deinit_dev(ndev);
Dave Jiang783dfa62016-11-16 14:03:38 -07002762 else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
Dave Jiang2f887b92015-05-20 12:55:47 -04002763 xeon_deinit_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002764err_init_dev:
2765 intel_ntb_deinit_pci(ndev);
2766err_init_pci:
2767 kfree(ndev);
2768err_ndev:
2769 return rc;
2770}
2771
2772static void intel_ntb_pci_remove(struct pci_dev *pdev)
2773{
2774 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
2775
2776 ntb_unregister_device(&ndev->ntb);
2777 ndev_deinit_debugfs(ndev);
Dave Jiang2f887b92015-05-20 12:55:47 -04002778 if (pdev_is_atom(pdev))
2779 atom_deinit_dev(ndev);
Dave Jiang783dfa62016-11-16 14:03:38 -07002780 else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
Dave Jiang2f887b92015-05-20 12:55:47 -04002781 xeon_deinit_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002782 intel_ntb_deinit_pci(ndev);
2783 kfree(ndev);
2784}
2785
Dave Jiang2f887b92015-05-20 12:55:47 -04002786static const struct intel_ntb_reg atom_reg = {
2787 .poll_link = atom_poll_link,
2788 .link_is_up = atom_link_is_up,
2789 .db_ioread = atom_db_ioread,
2790 .db_iowrite = atom_db_iowrite,
Allen Hubbee26a5842015-04-09 10:33:20 -04002791 .db_size = sizeof(u64),
Dave Jiang2f887b92015-05-20 12:55:47 -04002792 .ntb_ctl = ATOM_NTBCNTL_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002793 .mw_bar = {2, 4},
2794};
2795
Dave Jiang2f887b92015-05-20 12:55:47 -04002796static const struct intel_ntb_alt_reg atom_pri_reg = {
2797 .db_bell = ATOM_PDOORBELL_OFFSET,
2798 .db_mask = ATOM_PDBMSK_OFFSET,
2799 .spad = ATOM_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002800};
2801
Dave Jiang2f887b92015-05-20 12:55:47 -04002802static const struct intel_ntb_alt_reg atom_b2b_reg = {
2803 .db_bell = ATOM_B2B_DOORBELL_OFFSET,
2804 .spad = ATOM_B2B_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002805};
2806
Dave Jiang2f887b92015-05-20 12:55:47 -04002807static const struct intel_ntb_xlat_reg atom_sec_xlat = {
2808 /* FIXME : .bar0_base = ATOM_SBAR0BASE_OFFSET, */
2809 /* FIXME : .bar2_limit = ATOM_SBAR2LMT_OFFSET, */
2810 .bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002811};
2812
Dave Jiang2f887b92015-05-20 12:55:47 -04002813static const struct intel_ntb_reg xeon_reg = {
2814 .poll_link = xeon_poll_link,
2815 .link_is_up = xeon_link_is_up,
2816 .db_ioread = xeon_db_ioread,
2817 .db_iowrite = xeon_db_iowrite,
Allen Hubbee26a5842015-04-09 10:33:20 -04002818 .db_size = sizeof(u32),
Dave Jiang2f887b92015-05-20 12:55:47 -04002819 .ntb_ctl = XEON_NTBCNTL_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002820 .mw_bar = {2, 4, 5},
2821};
2822
Dave Jiang2f887b92015-05-20 12:55:47 -04002823static const struct intel_ntb_alt_reg xeon_pri_reg = {
2824 .db_bell = XEON_PDOORBELL_OFFSET,
2825 .db_mask = XEON_PDBMSK_OFFSET,
2826 .spad = XEON_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002827};
2828
Dave Jiang2f887b92015-05-20 12:55:47 -04002829static const struct intel_ntb_alt_reg xeon_sec_reg = {
2830 .db_bell = XEON_SDOORBELL_OFFSET,
2831 .db_mask = XEON_SDBMSK_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002832 /* second half of the scratchpads */
Dave Jiang2f887b92015-05-20 12:55:47 -04002833 .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
Allen Hubbee26a5842015-04-09 10:33:20 -04002834};
2835
Dave Jiang2f887b92015-05-20 12:55:47 -04002836static const struct intel_ntb_alt_reg xeon_b2b_reg = {
2837 .db_bell = XEON_B2B_DOORBELL_OFFSET,
2838 .spad = XEON_B2B_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002839};
2840
Dave Jiang2f887b92015-05-20 12:55:47 -04002841static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
Allen Hubbee26a5842015-04-09 10:33:20 -04002842 /* Note: no primary .bar0_base visible to the secondary side.
2843 *
2844 * The secondary side cannot get the base address stored in primary
2845 * bars. The base address is necessary to set the limit register to
2846 * any value other than zero, or unlimited.
2847 *
2848 * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2849 * window by setting the limit equal to base, nor can it limit the size
2850 * of the memory window by setting the limit to base + size.
2851 */
Dave Jiang2f887b92015-05-20 12:55:47 -04002852 .bar2_limit = XEON_PBAR23LMT_OFFSET,
2853 .bar2_xlat = XEON_PBAR23XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002854};
2855
Dave Jiang2f887b92015-05-20 12:55:47 -04002856static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
2857 .bar0_base = XEON_SBAR0BASE_OFFSET,
2858 .bar2_limit = XEON_SBAR23LMT_OFFSET,
2859 .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002860};
2861
Dave Jiang2f887b92015-05-20 12:55:47 -04002862static struct intel_b2b_addr xeon_b2b_usd_addr = {
Dave Jiang8b782fa2015-09-24 13:03:05 -07002863 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
2864 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
2865 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
2866 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
Allen Hubbee26a5842015-04-09 10:33:20 -04002867};
2868
Dave Jiang2f887b92015-05-20 12:55:47 -04002869static struct intel_b2b_addr xeon_b2b_dsd_addr = {
Dave Jiang8b782fa2015-09-24 13:03:05 -07002870 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
2871 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
2872 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
2873 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
Allen Hubbee26a5842015-04-09 10:33:20 -04002874};
2875
Dave Jiang783dfa62016-11-16 14:03:38 -07002876static const struct intel_ntb_reg skx_reg = {
Dave Jiang939ada52017-02-16 16:22:36 -07002877 .poll_link = skx_poll_link,
Dave Jiang783dfa62016-11-16 14:03:38 -07002878 .link_is_up = xeon_link_is_up,
2879 .db_ioread = skx_db_ioread,
2880 .db_iowrite = skx_db_iowrite,
2881 .db_size = sizeof(u64),
2882 .ntb_ctl = SKX_NTBCNTL_OFFSET,
2883 .mw_bar = {2, 4},
2884};
2885
2886static const struct intel_ntb_alt_reg skx_pri_reg = {
2887 .db_bell = SKX_EM_DOORBELL_OFFSET,
2888 .db_clear = SKX_IM_INT_STATUS_OFFSET,
2889 .db_mask = SKX_IM_INT_DISABLE_OFFSET,
2890 .spad = SKX_IM_SPAD_OFFSET,
2891};
2892
2893static const struct intel_ntb_alt_reg skx_b2b_reg = {
2894 .db_bell = SKX_IM_DOORBELL_OFFSET,
2895 .db_clear = SKX_EM_INT_STATUS_OFFSET,
2896 .db_mask = SKX_EM_INT_DISABLE_OFFSET,
2897 .spad = SKX_B2B_SPAD_OFFSET,
2898};
2899
2900static const struct intel_ntb_xlat_reg skx_sec_xlat = {
2901/* .bar0_base = SKX_EMBAR0_OFFSET, */
2902 .bar2_limit = SKX_IMBAR1XLMT_OFFSET,
2903 .bar2_xlat = SKX_IMBAR1XBASE_OFFSET,
2904};
2905
Allen Hubbee26a5842015-04-09 10:33:20 -04002906/* operations for primary side of local ntb */
2907static const struct ntb_dev_ops intel_ntb_ops = {
2908 .mw_count = intel_ntb_mw_count,
2909 .mw_get_range = intel_ntb_mw_get_range,
2910 .mw_set_trans = intel_ntb_mw_set_trans,
2911 .link_is_up = intel_ntb_link_is_up,
2912 .link_enable = intel_ntb_link_enable,
2913 .link_disable = intel_ntb_link_disable,
2914 .db_is_unsafe = intel_ntb_db_is_unsafe,
2915 .db_valid_mask = intel_ntb_db_valid_mask,
2916 .db_vector_count = intel_ntb_db_vector_count,
2917 .db_vector_mask = intel_ntb_db_vector_mask,
2918 .db_read = intel_ntb_db_read,
2919 .db_clear = intel_ntb_db_clear,
2920 .db_set_mask = intel_ntb_db_set_mask,
2921 .db_clear_mask = intel_ntb_db_clear_mask,
2922 .peer_db_addr = intel_ntb_peer_db_addr,
2923 .peer_db_set = intel_ntb_peer_db_set,
2924 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2925 .spad_count = intel_ntb_spad_count,
2926 .spad_read = intel_ntb_spad_read,
2927 .spad_write = intel_ntb_spad_write,
2928 .peer_spad_addr = intel_ntb_peer_spad_addr,
2929 .peer_spad_read = intel_ntb_peer_spad_read,
2930 .peer_spad_write = intel_ntb_peer_spad_write,
2931};
2932
Dave Jiang783dfa62016-11-16 14:03:38 -07002933static const struct ntb_dev_ops intel_ntb3_ops = {
2934 .mw_count = intel_ntb_mw_count,
2935 .mw_get_range = intel_ntb_mw_get_range,
2936 .mw_set_trans = intel_ntb3_mw_set_trans,
2937 .link_is_up = intel_ntb_link_is_up,
2938 .link_enable = intel_ntb3_link_enable,
2939 .link_disable = intel_ntb_link_disable,
2940 .db_valid_mask = intel_ntb_db_valid_mask,
2941 .db_vector_count = intel_ntb_db_vector_count,
2942 .db_vector_mask = intel_ntb_db_vector_mask,
2943 .db_read = intel_ntb3_db_read,
2944 .db_clear = intel_ntb3_db_clear,
2945 .db_set_mask = intel_ntb_db_set_mask,
2946 .db_clear_mask = intel_ntb_db_clear_mask,
2947 .peer_db_addr = intel_ntb_peer_db_addr,
2948 .peer_db_set = intel_ntb3_peer_db_set,
2949 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2950 .spad_count = intel_ntb_spad_count,
2951 .spad_read = intel_ntb_spad_read,
2952 .spad_write = intel_ntb_spad_write,
2953 .peer_spad_addr = intel_ntb_peer_spad_addr,
2954 .peer_spad_read = intel_ntb_peer_spad_read,
2955 .peer_spad_write = intel_ntb_peer_spad_write,
2956};
2957
Allen Hubbee26a5842015-04-09 10:33:20 -04002958static const struct file_operations intel_ntb_debugfs_info = {
2959 .owner = THIS_MODULE,
2960 .open = simple_open,
2961 .read = ndev_debugfs_read,
2962};
2963
2964static const struct pci_device_id intel_ntb_pci_tbl[] = {
Jon Masonfce8a7b2012-11-16 19:27:12 -07002965 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2966 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002967 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002968 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2969 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002970 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002971 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2972 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2973 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2974 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002975 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002976 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2977 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2978 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2979 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002980 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
Dave Jiang783dfa62016-11-16 14:03:38 -07002981 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002982 {0}
2983};
Allen Hubbee26a5842015-04-09 10:33:20 -04002984MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
Jon Masonfce8a7b2012-11-16 19:27:12 -07002985
Allen Hubbee26a5842015-04-09 10:33:20 -04002986static struct pci_driver intel_ntb_pci_driver = {
2987 .name = KBUILD_MODNAME,
2988 .id_table = intel_ntb_pci_tbl,
2989 .probe = intel_ntb_pci_probe,
2990 .remove = intel_ntb_pci_remove,
Jon Mason6465d022014-04-07 10:55:47 -07002991};
2992
Allen Hubbee26a5842015-04-09 10:33:20 -04002993static int __init intel_ntb_pci_driver_init(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07002994{
Dave Jiang7eb38782015-06-15 08:21:33 -04002995 pr_info("%s %s\n", NTB_DESC, NTB_VER);
2996
Allen Hubbee26a5842015-04-09 10:33:20 -04002997 if (debugfs_initialized())
Jon Mason1517a3f2013-07-30 15:58:49 -07002998 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2999
Allen Hubbee26a5842015-04-09 10:33:20 -04003000 return pci_register_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07003001}
Allen Hubbee26a5842015-04-09 10:33:20 -04003002module_init(intel_ntb_pci_driver_init);
Jon Mason1517a3f2013-07-30 15:58:49 -07003003
Allen Hubbee26a5842015-04-09 10:33:20 -04003004static void __exit intel_ntb_pci_driver_exit(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07003005{
Allen Hubbee26a5842015-04-09 10:33:20 -04003006 pci_unregister_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07003007
Allen Hubbee26a5842015-04-09 10:33:20 -04003008 debugfs_remove_recursive(debugfs_dir);
Jon Mason1517a3f2013-07-30 15:58:49 -07003009}
Allen Hubbee26a5842015-04-09 10:33:20 -04003010module_exit(intel_ntb_pci_driver_exit);
Jon Mason1517a3f2013-07-30 15:58:49 -07003011