blob: eca9688bf9d9fdff11ea091b9b2e3d9c115ab8e0 [file] [log] [blame]
Jon Masonfce8a7b2012-11-16 19:27:12 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -04008 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -040017 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -070018 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
45 * Intel PCIe NTB Linux driver
46 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
Allen Hubbee26a5842015-04-09 10:33:20 -040050
Jon Masonfce8a7b2012-11-16 19:27:12 -070051#include <linux/debugfs.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070052#include <linux/delay.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070053#include <linux/init.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56#include <linux/pci.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070057#include <linux/random.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070058#include <linux/slab.h>
Allen Hubbee26a5842015-04-09 10:33:20 -040059#include <linux/ntb.h>
60
Allen Hubbeec110bc2015-05-07 06:45:21 -040061#include "ntb_hw_intel.h"
Jon Masonfce8a7b2012-11-16 19:27:12 -070062
Allen Hubbee26a5842015-04-09 10:33:20 -040063#define NTB_NAME "ntb_hw_intel"
64#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
65#define NTB_VER "2.0"
Jon Masonfce8a7b2012-11-16 19:27:12 -070066
Allen Hubbee26a5842015-04-09 10:33:20 -040067MODULE_DESCRIPTION(NTB_DESC);
Jon Masonfce8a7b2012-11-16 19:27:12 -070068MODULE_VERSION(NTB_VER);
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_AUTHOR("Intel Corporation");
71
Allen Hubbee26a5842015-04-09 10:33:20 -040072#define bar0_off(base, bar) ((base) + ((bar) << 2))
73#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
Jon Masonfce8a7b2012-11-16 19:27:12 -070074
Dave Jiang2f887b92015-05-20 12:55:47 -040075static const struct intel_ntb_reg atom_reg;
76static const struct intel_ntb_alt_reg atom_pri_reg;
77static const struct intel_ntb_alt_reg atom_sec_reg;
78static const struct intel_ntb_alt_reg atom_b2b_reg;
79static const struct intel_ntb_xlat_reg atom_pri_xlat;
80static const struct intel_ntb_xlat_reg atom_sec_xlat;
81static const struct intel_ntb_reg xeon_reg;
82static const struct intel_ntb_alt_reg xeon_pri_reg;
83static const struct intel_ntb_alt_reg xeon_sec_reg;
84static const struct intel_ntb_alt_reg xeon_b2b_reg;
85static const struct intel_ntb_xlat_reg xeon_pri_xlat;
86static const struct intel_ntb_xlat_reg xeon_sec_xlat;
87static struct intel_b2b_addr xeon_b2b_usd_addr;
88static struct intel_b2b_addr xeon_b2b_dsd_addr;
Dave Jiang783dfa62016-11-16 14:03:38 -070089static const struct intel_ntb_reg skx_reg;
90static const struct intel_ntb_alt_reg skx_pri_reg;
91static const struct intel_ntb_alt_reg skx_b2b_reg;
92static const struct intel_ntb_xlat_reg skx_sec_xlat;
Allen Hubbe42fefc82015-05-11 05:45:30 -040093static const struct ntb_dev_ops intel_ntb_ops;
Dave Jiang783dfa62016-11-16 14:03:38 -070094static const struct ntb_dev_ops intel_ntb3_ops;
Allen Hubbe42fefc82015-05-11 05:45:30 -040095
96static const struct file_operations intel_ntb_debugfs_info;
97static struct dentry *debugfs_dir;
98
Allen Hubbee26a5842015-04-09 10:33:20 -040099static int b2b_mw_idx = -1;
100module_param(b2b_mw_idx, int, 0644);
101MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
102 "value of zero or positive starts from first mw idx, and a "
103 "negative value starts from last mw idx. Both sides MUST "
104 "set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -0700105
Allen Hubbee26a5842015-04-09 10:33:20 -0400106static unsigned int b2b_mw_share;
107module_param(b2b_mw_share, uint, 0644);
108MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
109 "ntb so that the peer ntb only occupies the first half of "
110 "the mw, so the second half can still be used as a mw. Both "
111 "sides MUST set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -0700112
Dave Jiang2f887b92015-05-20 12:55:47 -0400113module_param_named(xeon_b2b_usd_bar2_addr64,
114 xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
115MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
116 "XEON B2B USD BAR 2 64-bit address");
Allen Hubbee26a5842015-04-09 10:33:20 -0400117
Dave Jiang2f887b92015-05-20 12:55:47 -0400118module_param_named(xeon_b2b_usd_bar4_addr64,
119 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000120MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
Dave Jiang2f887b92015-05-20 12:55:47 -0400121 "XEON B2B USD BAR 4 64-bit address");
Allen Hubbee26a5842015-04-09 10:33:20 -0400122
Dave Jiang2f887b92015-05-20 12:55:47 -0400123module_param_named(xeon_b2b_usd_bar4_addr32,
124 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000125MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400126 "XEON B2B USD split-BAR 4 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400127
Dave Jiang2f887b92015-05-20 12:55:47 -0400128module_param_named(xeon_b2b_usd_bar5_addr32,
129 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000130MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400131 "XEON B2B USD split-BAR 5 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400132
Dave Jiang2f887b92015-05-20 12:55:47 -0400133module_param_named(xeon_b2b_dsd_bar2_addr64,
134 xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
135MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
136 "XEON B2B DSD BAR 2 64-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400137
Dave Jiang2f887b92015-05-20 12:55:47 -0400138module_param_named(xeon_b2b_dsd_bar4_addr64,
139 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000140MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
Dave Jiang2f887b92015-05-20 12:55:47 -0400141 "XEON B2B DSD BAR 4 64-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400142
Dave Jiang2f887b92015-05-20 12:55:47 -0400143module_param_named(xeon_b2b_dsd_bar4_addr32,
144 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000145MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400146 "XEON B2B DSD split-BAR 4 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400147
Dave Jiang2f887b92015-05-20 12:55:47 -0400148module_param_named(xeon_b2b_dsd_bar5_addr32,
149 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000150MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400151 "XEON B2B DSD split-BAR 5 32-bit address");
Jon Mason1517a3f2013-07-30 15:58:49 -0700152
Dave Jiang783dfa62016-11-16 14:03:38 -0700153static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
154static int xeon_init_isr(struct intel_ntb_dev *ndev);
155
Allen Hubbee26a5842015-04-09 10:33:20 -0400156#ifndef ioread64
157#ifdef readq
158#define ioread64 readq
159#else
160#define ioread64 _ioread64
161static inline u64 _ioread64(void __iomem *mmio)
162{
163 u64 low, high;
Jon Mason113bf1c2012-11-16 18:52:57 -0700164
Allen Hubbee26a5842015-04-09 10:33:20 -0400165 low = ioread32(mmio);
166 high = ioread32(mmio + sizeof(u32));
167 return low | (high << 32);
168}
169#endif
170#endif
Jon Masonfce8a7b2012-11-16 19:27:12 -0700171
Allen Hubbee26a5842015-04-09 10:33:20 -0400172#ifndef iowrite64
173#ifdef writeq
174#define iowrite64 writeq
175#else
176#define iowrite64 _iowrite64
177static inline void _iowrite64(u64 val, void __iomem *mmio)
178{
179 iowrite32(val, mmio);
180 iowrite32(val >> 32, mmio + sizeof(u32));
181}
182#endif
183#endif
184
Dave Jiang2f887b92015-05-20 12:55:47 -0400185static inline int pdev_is_atom(struct pci_dev *pdev)
Allen Hubbee26a5842015-04-09 10:33:20 -0400186{
187 switch (pdev->device) {
188 case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
189 return 1;
190 }
191 return 0;
192}
193
Dave Jiang2f887b92015-05-20 12:55:47 -0400194static inline int pdev_is_xeon(struct pci_dev *pdev)
Allen Hubbee26a5842015-04-09 10:33:20 -0400195{
196 switch (pdev->device) {
197 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
198 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
199 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
200 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -0400201 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
Allen Hubbee26a5842015-04-09 10:33:20 -0400202 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
203 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
204 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
205 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -0400206 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
Allen Hubbee26a5842015-04-09 10:33:20 -0400207 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
208 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
209 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
210 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -0400211 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Allen Hubbee26a5842015-04-09 10:33:20 -0400212 return 1;
213 }
214 return 0;
215}
216
Dave Jiang783dfa62016-11-16 14:03:38 -0700217static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
218{
219 if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
220 return 1;
221
222 return 0;
223}
224
Allen Hubbee26a5842015-04-09 10:33:20 -0400225static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
226{
227 ndev->unsafe_flags = 0;
228 ndev->unsafe_flags_ignore = 0;
229
230 /* Only B2B has a workaround to avoid SDOORBELL */
231 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
232 if (!ntb_topo_is_b2b(ndev->ntb.topo))
233 ndev->unsafe_flags |= NTB_UNSAFE_DB;
234
235 /* No low level workaround to avoid SB01BASE */
236 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
237 ndev->unsafe_flags |= NTB_UNSAFE_DB;
238 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
239 }
240}
241
242static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
243 unsigned long flag)
244{
245 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
246}
247
248static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
249 unsigned long flag)
250{
251 flag &= ndev->unsafe_flags;
252 ndev->unsafe_flags_ignore |= flag;
253
254 return !!flag;
255}
256
257static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
258{
Allen Hubbe9a078262015-08-31 09:31:00 -0400259 if (idx < 0 || idx >= ndev->mw_count)
Allen Hubbee26a5842015-04-09 10:33:20 -0400260 return -EINVAL;
261 return ndev->reg->mw_bar[idx];
262}
263
264static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
265 phys_addr_t *db_addr, resource_size_t *db_size,
266 phys_addr_t reg_addr, unsigned long reg)
267{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400268 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
269 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400270
271 if (db_addr) {
272 *db_addr = reg_addr + reg;
273 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
274 }
275
276 if (db_size) {
277 *db_size = ndev->reg->db_size;
278 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
279 }
280
281 return 0;
282}
283
284static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
285 void __iomem *mmio)
286{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400287 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
288 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400289
290 return ndev->reg->db_ioread(mmio);
291}
292
293static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
294 void __iomem *mmio)
295{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400296 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
297 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400298
299 if (db_bits & ~ndev->db_valid_mask)
300 return -EINVAL;
301
302 ndev->reg->db_iowrite(db_bits, mmio);
303
304 return 0;
305}
306
307static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
308 void __iomem *mmio)
309{
310 unsigned long irqflags;
311
Dave Jiangfd839bf2015-06-15 08:22:30 -0400312 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
313 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400314
315 if (db_bits & ~ndev->db_valid_mask)
316 return -EINVAL;
317
318 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
319 {
320 ndev->db_mask |= db_bits;
321 ndev->reg->db_iowrite(ndev->db_mask, mmio);
322 }
323 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
324
325 return 0;
326}
327
328static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
329 void __iomem *mmio)
330{
331 unsigned long irqflags;
332
Dave Jiangfd839bf2015-06-15 08:22:30 -0400333 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
334 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400335
336 if (db_bits & ~ndev->db_valid_mask)
337 return -EINVAL;
338
339 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
340 {
341 ndev->db_mask &= ~db_bits;
342 ndev->reg->db_iowrite(ndev->db_mask, mmio);
343 }
344 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
345
346 return 0;
347}
348
349static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
350{
351 u64 shift, mask;
352
353 shift = ndev->db_vec_shift;
354 mask = BIT_ULL(shift) - 1;
355
356 return mask << (shift * db_vector);
357}
358
359static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
360 phys_addr_t *spad_addr, phys_addr_t reg_addr,
361 unsigned long reg)
362{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400363 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
364 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400365
366 if (idx < 0 || idx >= ndev->spad_count)
367 return -EINVAL;
368
369 if (spad_addr) {
370 *spad_addr = reg_addr + reg + (idx << 2);
371 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
372 }
373
374 return 0;
375}
376
377static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
378 void __iomem *mmio)
379{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400380 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
381 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400382
383 if (idx < 0 || idx >= ndev->spad_count)
384 return 0;
385
386 return ioread32(mmio + (idx << 2));
387}
388
389static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
390 void __iomem *mmio)
391{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400392 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
393 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400394
395 if (idx < 0 || idx >= ndev->spad_count)
396 return -EINVAL;
397
398 iowrite32(val, mmio + (idx << 2));
399
400 return 0;
401}
402
403static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
404{
405 u64 vec_mask;
406
407 vec_mask = ndev_vec_mask(ndev, vec);
408
Dave Jiang783dfa62016-11-16 14:03:38 -0700409 if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
410 vec_mask |= ndev->db_link_mask;
411
Allen Hubbee26a5842015-04-09 10:33:20 -0400412 dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
413
414 ndev->last_ts = jiffies;
415
416 if (vec_mask & ndev->db_link_mask) {
417 if (ndev->reg->poll_link(ndev))
418 ntb_link_event(&ndev->ntb);
419 }
420
421 if (vec_mask & ndev->db_valid_mask)
422 ntb_db_event(&ndev->ntb, vec);
423
424 return IRQ_HANDLED;
425}
426
427static irqreturn_t ndev_vec_isr(int irq, void *dev)
428{
429 struct intel_ntb_vec *nvec = dev;
430
Dave Jiang783dfa62016-11-16 14:03:38 -0700431 dev_dbg(ndev_dev(nvec->ndev), "irq: %d nvec->num: %d\n",
432 irq, nvec->num);
433
Allen Hubbee26a5842015-04-09 10:33:20 -0400434 return ndev_interrupt(nvec->ndev, nvec->num);
435}
436
437static irqreturn_t ndev_irq_isr(int irq, void *dev)
438{
439 struct intel_ntb_dev *ndev = dev;
440
441 return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
442}
443
444static int ndev_init_isr(struct intel_ntb_dev *ndev,
445 int msix_min, int msix_max,
446 int msix_shift, int total_shift)
447{
448 struct pci_dev *pdev;
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400449 int rc, i, msix_count, node;
Allen Hubbee26a5842015-04-09 10:33:20 -0400450
451 pdev = ndev_pdev(ndev);
452
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400453 node = dev_to_node(&pdev->dev);
454
Allen Hubbee26a5842015-04-09 10:33:20 -0400455 /* Mask all doorbell interrupts */
456 ndev->db_mask = ndev->db_valid_mask;
457 ndev->reg->db_iowrite(ndev->db_mask,
458 ndev->self_mmio +
459 ndev->self_reg->db_mask);
460
461 /* Try to set up msix irq */
462
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400463 ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
464 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -0400465 if (!ndev->vec)
466 goto err_msix_vec_alloc;
467
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400468 ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
469 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -0400470 if (!ndev->msix)
471 goto err_msix_alloc;
472
473 for (i = 0; i < msix_max; ++i)
474 ndev->msix[i].entry = i;
475
476 msix_count = pci_enable_msix_range(pdev, ndev->msix,
477 msix_min, msix_max);
478 if (msix_count < 0)
479 goto err_msix_enable;
480
481 for (i = 0; i < msix_count; ++i) {
482 ndev->vec[i].ndev = ndev;
483 ndev->vec[i].num = i;
484 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
485 "ndev_vec_isr", &ndev->vec[i]);
486 if (rc)
487 goto err_msix_request;
488 }
489
Dave Jiang783dfa62016-11-16 14:03:38 -0700490 dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count);
Allen Hubbee26a5842015-04-09 10:33:20 -0400491 ndev->db_vec_count = msix_count;
492 ndev->db_vec_shift = msix_shift;
493 return 0;
494
495err_msix_request:
496 while (i-- > 0)
Christophe JAILLET28734e82016-12-19 06:52:55 +0100497 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
Allen Hubbee26a5842015-04-09 10:33:20 -0400498 pci_disable_msix(pdev);
499err_msix_enable:
500 kfree(ndev->msix);
501err_msix_alloc:
502 kfree(ndev->vec);
503err_msix_vec_alloc:
504 ndev->msix = NULL;
505 ndev->vec = NULL;
506
507 /* Try to set up msi irq */
508
509 rc = pci_enable_msi(pdev);
510 if (rc)
511 goto err_msi_enable;
512
513 rc = request_irq(pdev->irq, ndev_irq_isr, 0,
514 "ndev_irq_isr", ndev);
515 if (rc)
516 goto err_msi_request;
517
518 dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
519 ndev->db_vec_count = 1;
520 ndev->db_vec_shift = total_shift;
521 return 0;
522
523err_msi_request:
524 pci_disable_msi(pdev);
525err_msi_enable:
526
527 /* Try to set up intx irq */
528
529 pci_intx(pdev, 1);
530
531 rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
532 "ndev_irq_isr", ndev);
533 if (rc)
534 goto err_intx_request;
535
536 dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
537 ndev->db_vec_count = 1;
538 ndev->db_vec_shift = total_shift;
539 return 0;
540
541err_intx_request:
542 return rc;
543}
544
545static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
546{
547 struct pci_dev *pdev;
548 int i;
549
550 pdev = ndev_pdev(ndev);
551
552 /* Mask all doorbell interrupts */
553 ndev->db_mask = ndev->db_valid_mask;
554 ndev->reg->db_iowrite(ndev->db_mask,
555 ndev->self_mmio +
556 ndev->self_reg->db_mask);
557
558 if (ndev->msix) {
559 i = ndev->db_vec_count;
560 while (i--)
561 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
562 pci_disable_msix(pdev);
563 kfree(ndev->msix);
564 kfree(ndev->vec);
565 } else {
566 free_irq(pdev->irq, ndev);
567 if (pci_dev_msi_enabled(pdev))
568 pci_disable_msi(pdev);
569 }
570}
571
Dave Jiang783dfa62016-11-16 14:03:38 -0700572static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
573 size_t count, loff_t *offp)
574{
575 struct intel_ntb_dev *ndev;
576 void __iomem *mmio;
577 char *buf;
578 size_t buf_size;
579 ssize_t ret, off;
580 union { u64 v64; u32 v32; u16 v16; } u;
581
582 ndev = filp->private_data;
583 mmio = ndev->self_mmio;
584
585 buf_size = min(count, 0x800ul);
586
587 buf = kmalloc(buf_size, GFP_KERNEL);
588 if (!buf)
589 return -ENOMEM;
590
591 off = 0;
592
593 off += scnprintf(buf + off, buf_size - off,
594 "NTB Device Information:\n");
595
596 off += scnprintf(buf + off, buf_size - off,
597 "Connection Topology -\t%s\n",
598 ntb_topo_string(ndev->ntb.topo));
599
600 off += scnprintf(buf + off, buf_size - off,
601 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
602 off += scnprintf(buf + off, buf_size - off,
603 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
604
605 if (!ndev->reg->link_is_up(ndev))
606 off += scnprintf(buf + off, buf_size - off,
607 "Link Status -\t\tDown\n");
608 else {
609 off += scnprintf(buf + off, buf_size - off,
610 "Link Status -\t\tUp\n");
611 off += scnprintf(buf + off, buf_size - off,
612 "Link Speed -\t\tPCI-E Gen %u\n",
613 NTB_LNK_STA_SPEED(ndev->lnk_sta));
614 off += scnprintf(buf + off, buf_size - off,
615 "Link Width -\t\tx%u\n",
616 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
617 }
618
619 off += scnprintf(buf + off, buf_size - off,
620 "Memory Window Count -\t%u\n", ndev->mw_count);
621 off += scnprintf(buf + off, buf_size - off,
622 "Scratchpad Count -\t%u\n", ndev->spad_count);
623 off += scnprintf(buf + off, buf_size - off,
624 "Doorbell Count -\t%u\n", ndev->db_count);
625 off += scnprintf(buf + off, buf_size - off,
626 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
627 off += scnprintf(buf + off, buf_size - off,
628 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
629
630 off += scnprintf(buf + off, buf_size - off,
631 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
632 off += scnprintf(buf + off, buf_size - off,
633 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
634 off += scnprintf(buf + off, buf_size - off,
635 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
636
637 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
638 off += scnprintf(buf + off, buf_size - off,
639 "Doorbell Mask -\t\t%#llx\n", u.v64);
640
641 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
642 off += scnprintf(buf + off, buf_size - off,
643 "Doorbell Bell -\t\t%#llx\n", u.v64);
644
645 off += scnprintf(buf + off, buf_size - off,
646 "\nNTB Incoming XLAT:\n");
647
648 u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
649 off += scnprintf(buf + off, buf_size - off,
650 "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
651
652 u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
653 off += scnprintf(buf + off, buf_size - off,
654 "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
655
656 u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
657 off += scnprintf(buf + off, buf_size - off,
658 "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
659
660 u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
661 off += scnprintf(buf + off, buf_size - off,
662 "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
663
664 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
665 off += scnprintf(buf + off, buf_size - off,
666 "\nNTB Outgoing B2B XLAT:\n");
667
668 u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
669 off += scnprintf(buf + off, buf_size - off,
670 "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
671
672 u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
673 off += scnprintf(buf + off, buf_size - off,
674 "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
675
676 u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
677 off += scnprintf(buf + off, buf_size - off,
678 "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
679
680 u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
681 off += scnprintf(buf + off, buf_size - off,
682 "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
683
684 off += scnprintf(buf + off, buf_size - off,
685 "\nNTB Secondary BAR:\n");
686
687 u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
688 off += scnprintf(buf + off, buf_size - off,
689 "EMBAR0 -\t\t%#018llx\n", u.v64);
690
691 u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
692 off += scnprintf(buf + off, buf_size - off,
693 "EMBAR1 -\t\t%#018llx\n", u.v64);
694
695 u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
696 off += scnprintf(buf + off, buf_size - off,
697 "EMBAR2 -\t\t%#018llx\n", u.v64);
698 }
699
700 off += scnprintf(buf + off, buf_size - off,
701 "\nNTB Statistics:\n");
702
703 u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
704 off += scnprintf(buf + off, buf_size - off,
705 "Upstream Memory Miss -\t%u\n", u.v16);
706
707 off += scnprintf(buf + off, buf_size - off,
708 "\nNTB Hardware Errors:\n");
709
710 if (!pci_read_config_word(ndev->ntb.pdev,
711 SKX_DEVSTS_OFFSET, &u.v16))
712 off += scnprintf(buf + off, buf_size - off,
713 "DEVSTS -\t\t%#06x\n", u.v16);
714
715 if (!pci_read_config_word(ndev->ntb.pdev,
716 SKX_LINK_STATUS_OFFSET, &u.v16))
717 off += scnprintf(buf + off, buf_size - off,
718 "LNKSTS -\t\t%#06x\n", u.v16);
719
720 if (!pci_read_config_dword(ndev->ntb.pdev,
721 SKX_UNCERRSTS_OFFSET, &u.v32))
722 off += scnprintf(buf + off, buf_size - off,
723 "UNCERRSTS -\t\t%#06x\n", u.v32);
724
725 if (!pci_read_config_dword(ndev->ntb.pdev,
726 SKX_CORERRSTS_OFFSET, &u.v32))
727 off += scnprintf(buf + off, buf_size - off,
728 "CORERRSTS -\t\t%#06x\n", u.v32);
729
730 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
731 kfree(buf);
732 return ret;
733}
734
735static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
736 size_t count, loff_t *offp)
Allen Hubbee26a5842015-04-09 10:33:20 -0400737{
738 struct intel_ntb_dev *ndev;
Allen Hubbe40895272016-07-22 09:38:22 -0400739 struct pci_dev *pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400740 void __iomem *mmio;
741 char *buf;
742 size_t buf_size;
743 ssize_t ret, off;
Allen Hubbe40895272016-07-22 09:38:22 -0400744 union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
Allen Hubbee26a5842015-04-09 10:33:20 -0400745
746 ndev = filp->private_data;
Allen Hubbe40895272016-07-22 09:38:22 -0400747 pdev = ndev_pdev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -0400748 mmio = ndev->self_mmio;
749
750 buf_size = min(count, 0x800ul);
751
752 buf = kmalloc(buf_size, GFP_KERNEL);
753 if (!buf)
754 return -ENOMEM;
755
756 off = 0;
757
758 off += scnprintf(buf + off, buf_size - off,
759 "NTB Device Information:\n");
760
761 off += scnprintf(buf + off, buf_size - off,
762 "Connection Topology -\t%s\n",
763 ntb_topo_string(ndev->ntb.topo));
764
Allen Hubbe2aa2a77a2015-08-31 09:30:59 -0400765 if (ndev->b2b_idx != UINT_MAX) {
766 off += scnprintf(buf + off, buf_size - off,
767 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
768 off += scnprintf(buf + off, buf_size - off,
769 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
770 }
771
Allen Hubbee26a5842015-04-09 10:33:20 -0400772 off += scnprintf(buf + off, buf_size - off,
773 "BAR4 Split -\t\t%s\n",
774 ndev->bar4_split ? "yes" : "no");
775
776 off += scnprintf(buf + off, buf_size - off,
777 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
778 off += scnprintf(buf + off, buf_size - off,
779 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
780
781 if (!ndev->reg->link_is_up(ndev)) {
782 off += scnprintf(buf + off, buf_size - off,
783 "Link Status -\t\tDown\n");
784 } else {
785 off += scnprintf(buf + off, buf_size - off,
786 "Link Status -\t\tUp\n");
787 off += scnprintf(buf + off, buf_size - off,
788 "Link Speed -\t\tPCI-E Gen %u\n",
789 NTB_LNK_STA_SPEED(ndev->lnk_sta));
790 off += scnprintf(buf + off, buf_size - off,
791 "Link Width -\t\tx%u\n",
792 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
793 }
794
795 off += scnprintf(buf + off, buf_size - off,
796 "Memory Window Count -\t%u\n", ndev->mw_count);
797 off += scnprintf(buf + off, buf_size - off,
798 "Scratchpad Count -\t%u\n", ndev->spad_count);
799 off += scnprintf(buf + off, buf_size - off,
800 "Doorbell Count -\t%u\n", ndev->db_count);
801 off += scnprintf(buf + off, buf_size - off,
802 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
803 off += scnprintf(buf + off, buf_size - off,
804 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
805
806 off += scnprintf(buf + off, buf_size - off,
807 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
808 off += scnprintf(buf + off, buf_size - off,
809 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
810 off += scnprintf(buf + off, buf_size - off,
811 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
812
813 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
814 off += scnprintf(buf + off, buf_size - off,
815 "Doorbell Mask -\t\t%#llx\n", u.v64);
816
817 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
818 off += scnprintf(buf + off, buf_size - off,
819 "Doorbell Bell -\t\t%#llx\n", u.v64);
820
821 off += scnprintf(buf + off, buf_size - off,
Allen Hubbe40895272016-07-22 09:38:22 -0400822 "\nNTB Window Size:\n");
823
824 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
825 off += scnprintf(buf + off, buf_size - off,
826 "PBAR23SZ %hhu\n", u.v8);
827 if (!ndev->bar4_split) {
828 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
829 off += scnprintf(buf + off, buf_size - off,
830 "PBAR45SZ %hhu\n", u.v8);
831 } else {
832 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
833 off += scnprintf(buf + off, buf_size - off,
834 "PBAR4SZ %hhu\n", u.v8);
835 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
836 off += scnprintf(buf + off, buf_size - off,
837 "PBAR5SZ %hhu\n", u.v8);
838 }
839
840 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
841 off += scnprintf(buf + off, buf_size - off,
842 "SBAR23SZ %hhu\n", u.v8);
843 if (!ndev->bar4_split) {
844 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
845 off += scnprintf(buf + off, buf_size - off,
846 "SBAR45SZ %hhu\n", u.v8);
847 } else {
848 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
849 off += scnprintf(buf + off, buf_size - off,
850 "SBAR4SZ %hhu\n", u.v8);
851 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
852 off += scnprintf(buf + off, buf_size - off,
853 "SBAR5SZ %hhu\n", u.v8);
854 }
855
856 off += scnprintf(buf + off, buf_size - off,
Allen Hubbee26a5842015-04-09 10:33:20 -0400857 "\nNTB Incoming XLAT:\n");
858
859 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
860 off += scnprintf(buf + off, buf_size - off,
861 "XLAT23 -\t\t%#018llx\n", u.v64);
862
Dave Jiangbf44fe42015-06-18 05:17:30 -0400863 if (ndev->bar4_split) {
864 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
865 off += scnprintf(buf + off, buf_size - off,
866 "XLAT4 -\t\t\t%#06x\n", u.v32);
867
868 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
869 off += scnprintf(buf + off, buf_size - off,
870 "XLAT5 -\t\t\t%#06x\n", u.v32);
871 } else {
872 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
873 off += scnprintf(buf + off, buf_size - off,
874 "XLAT45 -\t\t%#018llx\n", u.v64);
875 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400876
877 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
878 off += scnprintf(buf + off, buf_size - off,
879 "LMT23 -\t\t\t%#018llx\n", u.v64);
880
Dave Jiangbf44fe42015-06-18 05:17:30 -0400881 if (ndev->bar4_split) {
882 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
883 off += scnprintf(buf + off, buf_size - off,
884 "LMT4 -\t\t\t%#06x\n", u.v32);
885 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
886 off += scnprintf(buf + off, buf_size - off,
887 "LMT5 -\t\t\t%#06x\n", u.v32);
888 } else {
889 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
890 off += scnprintf(buf + off, buf_size - off,
891 "LMT45 -\t\t\t%#018llx\n", u.v64);
892 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400893
Allen Hubbe95f14642016-07-22 09:38:23 -0400894 if (pdev_is_xeon(pdev)) {
Allen Hubbee26a5842015-04-09 10:33:20 -0400895 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
896 off += scnprintf(buf + off, buf_size - off,
897 "\nNTB Outgoing B2B XLAT:\n");
898
Dave Jiang2f887b92015-05-20 12:55:47 -0400899 u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400900 off += scnprintf(buf + off, buf_size - off,
901 "B2B XLAT23 -\t\t%#018llx\n", u.v64);
902
Dave Jiangbf44fe42015-06-18 05:17:30 -0400903 if (ndev->bar4_split) {
904 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
905 off += scnprintf(buf + off, buf_size - off,
906 "B2B XLAT4 -\t\t%#06x\n",
907 u.v32);
908 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
909 off += scnprintf(buf + off, buf_size - off,
910 "B2B XLAT5 -\t\t%#06x\n",
911 u.v32);
912 } else {
913 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
914 off += scnprintf(buf + off, buf_size - off,
915 "B2B XLAT45 -\t\t%#018llx\n",
916 u.v64);
917 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400918
Dave Jiang2f887b92015-05-20 12:55:47 -0400919 u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400920 off += scnprintf(buf + off, buf_size - off,
921 "B2B LMT23 -\t\t%#018llx\n", u.v64);
922
Dave Jiangbf44fe42015-06-18 05:17:30 -0400923 if (ndev->bar4_split) {
924 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
925 off += scnprintf(buf + off, buf_size - off,
926 "B2B LMT4 -\t\t%#06x\n",
927 u.v32);
928 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
929 off += scnprintf(buf + off, buf_size - off,
930 "B2B LMT5 -\t\t%#06x\n",
931 u.v32);
932 } else {
933 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
934 off += scnprintf(buf + off, buf_size - off,
935 "B2B LMT45 -\t\t%#018llx\n",
936 u.v64);
937 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400938
939 off += scnprintf(buf + off, buf_size - off,
940 "\nNTB Secondary BAR:\n");
941
Dave Jiang2f887b92015-05-20 12:55:47 -0400942 u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400943 off += scnprintf(buf + off, buf_size - off,
944 "SBAR01 -\t\t%#018llx\n", u.v64);
945
Dave Jiang2f887b92015-05-20 12:55:47 -0400946 u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400947 off += scnprintf(buf + off, buf_size - off,
948 "SBAR23 -\t\t%#018llx\n", u.v64);
949
Dave Jiangbf44fe42015-06-18 05:17:30 -0400950 if (ndev->bar4_split) {
951 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
952 off += scnprintf(buf + off, buf_size - off,
953 "SBAR4 -\t\t\t%#06x\n", u.v32);
954 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
955 off += scnprintf(buf + off, buf_size - off,
956 "SBAR5 -\t\t\t%#06x\n", u.v32);
957 } else {
958 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
959 off += scnprintf(buf + off, buf_size - off,
960 "SBAR45 -\t\t%#018llx\n",
961 u.v64);
962 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400963 }
964
965 off += scnprintf(buf + off, buf_size - off,
Dave Jiang2f887b92015-05-20 12:55:47 -0400966 "\nXEON NTB Statistics:\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400967
Dave Jiang2f887b92015-05-20 12:55:47 -0400968 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400969 off += scnprintf(buf + off, buf_size - off,
970 "Upstream Memory Miss -\t%u\n", u.v16);
971
972 off += scnprintf(buf + off, buf_size - off,
Dave Jiang2f887b92015-05-20 12:55:47 -0400973 "\nXEON NTB Hardware Errors:\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400974
Allen Hubbe95f14642016-07-22 09:38:23 -0400975 if (!pci_read_config_word(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400976 XEON_DEVSTS_OFFSET, &u.v16))
Allen Hubbee26a5842015-04-09 10:33:20 -0400977 off += scnprintf(buf + off, buf_size - off,
978 "DEVSTS -\t\t%#06x\n", u.v16);
979
Allen Hubbe95f14642016-07-22 09:38:23 -0400980 if (!pci_read_config_word(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400981 XEON_LINK_STATUS_OFFSET, &u.v16))
Allen Hubbee26a5842015-04-09 10:33:20 -0400982 off += scnprintf(buf + off, buf_size - off,
983 "LNKSTS -\t\t%#06x\n", u.v16);
984
Allen Hubbe95f14642016-07-22 09:38:23 -0400985 if (!pci_read_config_dword(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400986 XEON_UNCERRSTS_OFFSET, &u.v32))
Allen Hubbee26a5842015-04-09 10:33:20 -0400987 off += scnprintf(buf + off, buf_size - off,
988 "UNCERRSTS -\t\t%#06x\n", u.v32);
989
Allen Hubbe95f14642016-07-22 09:38:23 -0400990 if (!pci_read_config_dword(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400991 XEON_CORERRSTS_OFFSET, &u.v32))
Allen Hubbee26a5842015-04-09 10:33:20 -0400992 off += scnprintf(buf + off, buf_size - off,
993 "CORERRSTS -\t\t%#06x\n", u.v32);
994 }
995
996 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
997 kfree(buf);
998 return ret;
999}
1000
Dave Jiang783dfa62016-11-16 14:03:38 -07001001static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
1002 size_t count, loff_t *offp)
1003{
1004 struct intel_ntb_dev *ndev = filp->private_data;
1005
1006 if (pdev_is_xeon(ndev->ntb.pdev) ||
1007 pdev_is_atom(ndev->ntb.pdev))
1008 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
1009 else if (pdev_is_skx_xeon(ndev->ntb.pdev))
1010 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
1011
1012 return -ENXIO;
1013}
1014
Allen Hubbee26a5842015-04-09 10:33:20 -04001015static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
1016{
1017 if (!debugfs_dir) {
1018 ndev->debugfs_dir = NULL;
1019 ndev->debugfs_info = NULL;
1020 } else {
1021 ndev->debugfs_dir =
1022 debugfs_create_dir(ndev_name(ndev), debugfs_dir);
1023 if (!ndev->debugfs_dir)
1024 ndev->debugfs_info = NULL;
1025 else
1026 ndev->debugfs_info =
1027 debugfs_create_file("info", S_IRUSR,
1028 ndev->debugfs_dir, ndev,
1029 &intel_ntb_debugfs_info);
1030 }
1031}
1032
1033static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
1034{
1035 debugfs_remove_recursive(ndev->debugfs_dir);
1036}
1037
1038static int intel_ntb_mw_count(struct ntb_dev *ntb)
1039{
1040 return ntb_ndev(ntb)->mw_count;
1041}
1042
1043static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
1044 phys_addr_t *base,
1045 resource_size_t *size,
1046 resource_size_t *align,
1047 resource_size_t *align_size)
1048{
1049 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1050 int bar;
1051
1052 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1053 idx += 1;
1054
1055 bar = ndev_mw_to_bar(ndev, idx);
1056 if (bar < 0)
1057 return bar;
1058
1059 if (base)
1060 *base = pci_resource_start(ndev->ntb.pdev, bar) +
1061 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1062
1063 if (size)
1064 *size = pci_resource_len(ndev->ntb.pdev, bar) -
1065 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1066
1067 if (align)
1068 *align = pci_resource_len(ndev->ntb.pdev, bar);
1069
1070 if (align_size)
1071 *align_size = 1;
1072
1073 return 0;
1074}
1075
1076static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
1077 dma_addr_t addr, resource_size_t size)
1078{
1079 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1080 unsigned long base_reg, xlat_reg, limit_reg;
1081 resource_size_t bar_size, mw_size;
1082 void __iomem *mmio;
1083 u64 base, limit, reg_val;
1084 int bar;
1085
1086 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1087 idx += 1;
1088
1089 bar = ndev_mw_to_bar(ndev, idx);
1090 if (bar < 0)
1091 return bar;
1092
1093 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1094
1095 if (idx == ndev->b2b_idx)
1096 mw_size = bar_size - ndev->b2b_off;
1097 else
1098 mw_size = bar_size;
1099
1100 /* hardware requires that addr is aligned to bar size */
1101 if (addr & (bar_size - 1))
1102 return -EINVAL;
1103
1104 /* make sure the range fits in the usable mw size */
1105 if (size > mw_size)
1106 return -EINVAL;
1107
1108 mmio = ndev->self_mmio;
1109 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
1110 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
1111 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
1112
1113 if (bar < 4 || !ndev->bar4_split) {
Dave Jiang703872c2015-11-19 14:00:54 -07001114 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
Allen Hubbee26a5842015-04-09 10:33:20 -04001115
1116 /* Set the limit if supported, if size is not mw_size */
1117 if (limit_reg && size != mw_size)
1118 limit = base + size;
1119 else
1120 limit = 0;
1121
1122 /* set and verify setting the translation address */
1123 iowrite64(addr, mmio + xlat_reg);
1124 reg_val = ioread64(mmio + xlat_reg);
1125 if (reg_val != addr) {
1126 iowrite64(0, mmio + xlat_reg);
1127 return -EIO;
1128 }
1129
1130 /* set and verify setting the limit */
1131 iowrite64(limit, mmio + limit_reg);
1132 reg_val = ioread64(mmio + limit_reg);
1133 if (reg_val != limit) {
1134 iowrite64(base, mmio + limit_reg);
1135 iowrite64(0, mmio + xlat_reg);
1136 return -EIO;
1137 }
1138 } else {
1139 /* split bar addr range must all be 32 bit */
1140 if (addr & (~0ull << 32))
1141 return -EINVAL;
1142 if ((addr + size) & (~0ull << 32))
1143 return -EINVAL;
1144
Dave Jiang703872c2015-11-19 14:00:54 -07001145 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
Allen Hubbee26a5842015-04-09 10:33:20 -04001146
1147 /* Set the limit if supported, if size is not mw_size */
1148 if (limit_reg && size != mw_size)
1149 limit = base + size;
1150 else
1151 limit = 0;
1152
1153 /* set and verify setting the translation address */
1154 iowrite32(addr, mmio + xlat_reg);
1155 reg_val = ioread32(mmio + xlat_reg);
1156 if (reg_val != addr) {
1157 iowrite32(0, mmio + xlat_reg);
1158 return -EIO;
1159 }
1160
1161 /* set and verify setting the limit */
1162 iowrite32(limit, mmio + limit_reg);
1163 reg_val = ioread32(mmio + limit_reg);
1164 if (reg_val != limit) {
1165 iowrite32(base, mmio + limit_reg);
1166 iowrite32(0, mmio + xlat_reg);
1167 return -EIO;
1168 }
1169 }
1170
1171 return 0;
1172}
1173
1174static int intel_ntb_link_is_up(struct ntb_dev *ntb,
1175 enum ntb_speed *speed,
1176 enum ntb_width *width)
1177{
1178 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1179
1180 if (ndev->reg->link_is_up(ndev)) {
1181 if (speed)
1182 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
1183 if (width)
1184 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
1185 return 1;
1186 } else {
1187 /* TODO MAYBE: is it possible to observe the link speed and
1188 * width while link is training? */
1189 if (speed)
1190 *speed = NTB_SPEED_NONE;
1191 if (width)
1192 *width = NTB_WIDTH_NONE;
1193 return 0;
1194 }
1195}
1196
1197static int intel_ntb_link_enable(struct ntb_dev *ntb,
1198 enum ntb_speed max_speed,
1199 enum ntb_width max_width)
1200{
1201 struct intel_ntb_dev *ndev;
1202 u32 ntb_ctl;
1203
1204 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1205
1206 if (ndev->ntb.topo == NTB_TOPO_SEC)
1207 return -EINVAL;
1208
1209 dev_dbg(ndev_dev(ndev),
1210 "Enabling link with max_speed %d max_width %d\n",
1211 max_speed, max_width);
1212 if (max_speed != NTB_SPEED_AUTO)
1213 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
1214 if (max_width != NTB_WIDTH_AUTO)
1215 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
1216
1217 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1218 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1219 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1220 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1221 if (ndev->bar4_split)
1222 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
1223 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1224
1225 return 0;
1226}
1227
1228static int intel_ntb_link_disable(struct ntb_dev *ntb)
1229{
1230 struct intel_ntb_dev *ndev;
1231 u32 ntb_cntl;
1232
1233 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1234
1235 if (ndev->ntb.topo == NTB_TOPO_SEC)
1236 return -EINVAL;
1237
1238 dev_dbg(ndev_dev(ndev), "Disabling link\n");
1239
1240 /* Bring NTB link down */
1241 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1242 ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1243 ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1244 if (ndev->bar4_split)
1245 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1246 ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1247 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1248
1249 return 0;
1250}
1251
1252static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1253{
1254 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1255}
1256
1257static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1258{
1259 return ntb_ndev(ntb)->db_valid_mask;
1260}
1261
1262static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1263{
1264 struct intel_ntb_dev *ndev;
1265
1266 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1267
1268 return ndev->db_vec_count;
1269}
1270
1271static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1272{
1273 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1274
1275 if (db_vector < 0 || db_vector > ndev->db_vec_count)
1276 return 0;
1277
1278 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1279}
1280
1281static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1282{
1283 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1284
1285 return ndev_db_read(ndev,
1286 ndev->self_mmio +
1287 ndev->self_reg->db_bell);
1288}
1289
1290static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1291{
1292 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1293
1294 return ndev_db_write(ndev, db_bits,
1295 ndev->self_mmio +
1296 ndev->self_reg->db_bell);
1297}
1298
1299static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1300{
1301 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1302
1303 return ndev_db_set_mask(ndev, db_bits,
1304 ndev->self_mmio +
1305 ndev->self_reg->db_mask);
1306}
1307
1308static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1309{
1310 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1311
1312 return ndev_db_clear_mask(ndev, db_bits,
1313 ndev->self_mmio +
1314 ndev->self_reg->db_mask);
1315}
1316
1317static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
1318 phys_addr_t *db_addr,
1319 resource_size_t *db_size)
1320{
1321 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1322
1323 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1324 ndev->peer_reg->db_bell);
1325}
1326
1327static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1328{
1329 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1330
1331 return ndev_db_write(ndev, db_bits,
1332 ndev->peer_mmio +
1333 ndev->peer_reg->db_bell);
1334}
1335
1336static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1337{
1338 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1339}
1340
1341static int intel_ntb_spad_count(struct ntb_dev *ntb)
1342{
1343 struct intel_ntb_dev *ndev;
1344
1345 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1346
1347 return ndev->spad_count;
1348}
1349
1350static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1351{
1352 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1353
1354 return ndev_spad_read(ndev, idx,
1355 ndev->self_mmio +
1356 ndev->self_reg->spad);
1357}
1358
1359static int intel_ntb_spad_write(struct ntb_dev *ntb,
1360 int idx, u32 val)
1361{
1362 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1363
1364 return ndev_spad_write(ndev, idx, val,
1365 ndev->self_mmio +
1366 ndev->self_reg->spad);
1367}
1368
1369static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1370 phys_addr_t *spad_addr)
1371{
1372 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1373
1374 return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1375 ndev->peer_reg->spad);
1376}
1377
1378static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1379{
1380 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1381
1382 return ndev_spad_read(ndev, idx,
1383 ndev->peer_mmio +
1384 ndev->peer_reg->spad);
1385}
1386
1387static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1388 int idx, u32 val)
1389{
1390 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1391
1392 return ndev_spad_write(ndev, idx, val,
1393 ndev->peer_mmio +
1394 ndev->peer_reg->spad);
1395}
1396
Dave Jiang2f887b92015-05-20 12:55:47 -04001397/* ATOM */
Allen Hubbee26a5842015-04-09 10:33:20 -04001398
Dave Jiang2f887b92015-05-20 12:55:47 -04001399static u64 atom_db_ioread(void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04001400{
1401 return ioread64(mmio);
1402}
1403
Dave Jiang2f887b92015-05-20 12:55:47 -04001404static void atom_db_iowrite(u64 bits, void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04001405{
1406 iowrite64(bits, mmio);
1407}
1408
Dave Jiang2f887b92015-05-20 12:55:47 -04001409static int atom_poll_link(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001410{
1411 u32 ntb_ctl;
1412
Dave Jiang2f887b92015-05-20 12:55:47 -04001413 ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001414
1415 if (ntb_ctl == ndev->ntb_ctl)
1416 return 0;
1417
1418 ndev->ntb_ctl = ntb_ctl;
1419
Dave Jiang2f887b92015-05-20 12:55:47 -04001420 ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001421
1422 return 1;
1423}
1424
Dave Jiang2f887b92015-05-20 12:55:47 -04001425static int atom_link_is_up(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001426{
Dave Jiang2f887b92015-05-20 12:55:47 -04001427 return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
Allen Hubbee26a5842015-04-09 10:33:20 -04001428}
1429
Dave Jiang2f887b92015-05-20 12:55:47 -04001430static int atom_link_is_err(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001431{
Dave Jiang2f887b92015-05-20 12:55:47 -04001432 if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
1433 & ATOM_LTSSMSTATEJMP_FORCEDETECT)
Allen Hubbee26a5842015-04-09 10:33:20 -04001434 return 1;
1435
Dave Jiang2f887b92015-05-20 12:55:47 -04001436 if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
1437 & ATOM_IBIST_ERR_OFLOW)
Allen Hubbee26a5842015-04-09 10:33:20 -04001438 return 1;
1439
1440 return 0;
1441}
1442
Dave Jiang2f887b92015-05-20 12:55:47 -04001443static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04001444{
Dave Jiang2f887b92015-05-20 12:55:47 -04001445 switch (ppd & ATOM_PPD_TOPO_MASK) {
1446 case ATOM_PPD_TOPO_B2B_USD:
Allen Hubbee26a5842015-04-09 10:33:20 -04001447 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1448 return NTB_TOPO_B2B_USD;
1449
Dave Jiang2f887b92015-05-20 12:55:47 -04001450 case ATOM_PPD_TOPO_B2B_DSD:
Allen Hubbee26a5842015-04-09 10:33:20 -04001451 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1452 return NTB_TOPO_B2B_DSD;
1453
Dave Jiang2f887b92015-05-20 12:55:47 -04001454 case ATOM_PPD_TOPO_PRI_USD:
1455 case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1456 case ATOM_PPD_TOPO_SEC_USD:
1457 case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04001458 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1459 return NTB_TOPO_NONE;
1460 }
1461
1462 dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1463 return NTB_TOPO_NONE;
1464}
1465
Dave Jiang2f887b92015-05-20 12:55:47 -04001466static void atom_link_hb(struct work_struct *work)
Allen Hubbee26a5842015-04-09 10:33:20 -04001467{
1468 struct intel_ntb_dev *ndev = hb_ndev(work);
1469 unsigned long poll_ts;
1470 void __iomem *mmio;
1471 u32 status32;
1472
Dave Jiang2f887b92015-05-20 12:55:47 -04001473 poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
Allen Hubbee26a5842015-04-09 10:33:20 -04001474
1475 /* Delay polling the link status if an interrupt was received,
1476 * unless the cached link status says the link is down.
1477 */
Dave Jiang2f887b92015-05-20 12:55:47 -04001478 if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
Allen Hubbee26a5842015-04-09 10:33:20 -04001479 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1480 return;
1481 }
1482
Dave Jiang2f887b92015-05-20 12:55:47 -04001483 if (atom_poll_link(ndev))
Allen Hubbee26a5842015-04-09 10:33:20 -04001484 ntb_link_event(&ndev->ntb);
1485
Dave Jiang2f887b92015-05-20 12:55:47 -04001486 if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
1487 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
Allen Hubbee26a5842015-04-09 10:33:20 -04001488 return;
1489 }
1490
1491 /* Link is down with error: recover the link! */
1492
1493 mmio = ndev->self_mmio;
1494
1495 /* Driver resets the NTB ModPhy lanes - magic! */
Dave Jiang2f887b92015-05-20 12:55:47 -04001496 iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
1497 iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
1498 iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
1499 iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
Allen Hubbee26a5842015-04-09 10:33:20 -04001500
1501 /* Driver waits 100ms to allow the NTB ModPhy to settle */
1502 msleep(100);
1503
1504 /* Clear AER Errors, write to clear */
Dave Jiang2f887b92015-05-20 12:55:47 -04001505 status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001506 dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1507 status32 &= PCI_ERR_COR_REP_ROLL;
Dave Jiang2f887b92015-05-20 12:55:47 -04001508 iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001509
1510 /* Clear unexpected electrical idle event in LTSSM, write to clear */
Dave Jiang2f887b92015-05-20 12:55:47 -04001511 status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001512 dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
Dave Jiang2f887b92015-05-20 12:55:47 -04001513 status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
1514 iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001515
1516 /* Clear DeSkew Buffer error, write to clear */
Dave Jiang2f887b92015-05-20 12:55:47 -04001517 status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001518 dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
Dave Jiang2f887b92015-05-20 12:55:47 -04001519 status32 |= ATOM_DESKEWSTS_DBERR;
1520 iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001521
Dave Jiang2f887b92015-05-20 12:55:47 -04001522 status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001523 dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
Dave Jiang2f887b92015-05-20 12:55:47 -04001524 status32 &= ATOM_IBIST_ERR_OFLOW;
1525 iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001526
1527 /* Releases the NTB state machine to allow the link to retrain */
Dave Jiang2f887b92015-05-20 12:55:47 -04001528 status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001529 dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
Dave Jiang2f887b92015-05-20 12:55:47 -04001530 status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
1531 iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001532
1533 /* There is a potential race between the 2 NTB devices recovering at the
1534 * same time. If the times are the same, the link will not recover and
1535 * the driver will be stuck in this loop forever. Add a random interval
1536 * to the recovery time to prevent this race.
1537 */
Dave Jiang2f887b92015-05-20 12:55:47 -04001538 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
1539 + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
Allen Hubbee26a5842015-04-09 10:33:20 -04001540}
1541
Dave Jiang2f887b92015-05-20 12:55:47 -04001542static int atom_init_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001543{
1544 int rc;
1545
Dave Jiang2f887b92015-05-20 12:55:47 -04001546 rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
1547 ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
Allen Hubbee26a5842015-04-09 10:33:20 -04001548 if (rc)
1549 return rc;
1550
Dave Jiang2f887b92015-05-20 12:55:47 -04001551 /* ATOM doesn't have link status interrupt, poll on that platform */
Allen Hubbee26a5842015-04-09 10:33:20 -04001552 ndev->last_ts = jiffies;
Dave Jiang2f887b92015-05-20 12:55:47 -04001553 INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
1554 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
Allen Hubbee26a5842015-04-09 10:33:20 -04001555
1556 return 0;
1557}
1558
Dave Jiang2f887b92015-05-20 12:55:47 -04001559static void atom_deinit_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001560{
1561 cancel_delayed_work_sync(&ndev->hb_timer);
1562 ndev_deinit_isr(ndev);
1563}
1564
Dave Jiang2f887b92015-05-20 12:55:47 -04001565static int atom_init_ntb(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001566{
Dave Jiang2f887b92015-05-20 12:55:47 -04001567 ndev->mw_count = ATOM_MW_COUNT;
1568 ndev->spad_count = ATOM_SPAD_COUNT;
1569 ndev->db_count = ATOM_DB_COUNT;
Allen Hubbee26a5842015-04-09 10:33:20 -04001570
1571 switch (ndev->ntb.topo) {
1572 case NTB_TOPO_B2B_USD:
1573 case NTB_TOPO_B2B_DSD:
Dave Jiang2f887b92015-05-20 12:55:47 -04001574 ndev->self_reg = &atom_pri_reg;
1575 ndev->peer_reg = &atom_b2b_reg;
1576 ndev->xlat_reg = &atom_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04001577
1578 /* Enable Bus Master and Memory Space on the secondary side */
1579 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
Dave Jiang2f887b92015-05-20 12:55:47 -04001580 ndev->self_mmio + ATOM_SPCICMD_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001581
1582 break;
1583
1584 default:
1585 return -EINVAL;
1586 }
1587
1588 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1589
1590 return 0;
1591}
1592
Dave Jiang2f887b92015-05-20 12:55:47 -04001593static int atom_init_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001594{
1595 u32 ppd;
1596 int rc;
1597
Dave Jiang2f887b92015-05-20 12:55:47 -04001598 rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04001599 if (rc)
1600 return -EIO;
1601
Dave Jiang2f887b92015-05-20 12:55:47 -04001602 ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04001603 if (ndev->ntb.topo == NTB_TOPO_NONE)
1604 return -EINVAL;
1605
Dave Jiang2f887b92015-05-20 12:55:47 -04001606 rc = atom_init_ntb(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001607 if (rc)
1608 return rc;
1609
Dave Jiang2f887b92015-05-20 12:55:47 -04001610 rc = atom_init_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001611 if (rc)
1612 return rc;
1613
1614 if (ndev->ntb.topo != NTB_TOPO_SEC) {
1615 /* Initiate PCI-E link training */
Dave Jiang2f887b92015-05-20 12:55:47 -04001616 rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
1617 ppd | ATOM_PPD_INIT_LINK);
Allen Hubbee26a5842015-04-09 10:33:20 -04001618 if (rc)
1619 return rc;
1620 }
1621
1622 return 0;
1623}
1624
Dave Jiang2f887b92015-05-20 12:55:47 -04001625static void atom_deinit_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001626{
Dave Jiang2f887b92015-05-20 12:55:47 -04001627 atom_deinit_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001628}
1629
Dave Jiang783dfa62016-11-16 14:03:38 -07001630/* Skylake Xeon NTB */
1631
1632static u64 skx_db_ioread(void __iomem *mmio)
1633{
1634 return ioread64(mmio);
1635}
1636
1637static void skx_db_iowrite(u64 bits, void __iomem *mmio)
1638{
1639 iowrite64(bits, mmio);
1640}
1641
1642static int skx_init_isr(struct intel_ntb_dev *ndev)
1643{
1644 int i;
1645
1646 /*
1647 * The MSIX vectors and the interrupt status bits are not lined up
1648 * on Skylake. By default the link status bit is bit 32, however it
1649 * is by default MSIX vector0. We need to fixup to line them up.
1650 * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
1651 */
1652
1653 for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
1654 iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
1655
1656 /* move link status down one as workaround */
1657 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
1658 iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
1659 ndev->self_mmio + SKX_INTVEC_OFFSET +
1660 (SKX_DB_MSIX_VECTOR_COUNT - 1));
1661 }
1662
1663 return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
1664 SKX_DB_MSIX_VECTOR_COUNT,
1665 SKX_DB_MSIX_VECTOR_SHIFT,
1666 SKX_DB_TOTAL_SHIFT);
1667}
1668
1669static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
1670 const struct intel_b2b_addr *addr,
1671 const struct intel_b2b_addr *peer_addr)
1672{
1673 struct pci_dev *pdev;
1674 void __iomem *mmio;
1675 resource_size_t bar_size;
1676 phys_addr_t bar_addr;
1677 int b2b_bar;
1678 u8 bar_sz;
1679
1680 pdev = ndev_pdev(ndev);
1681 mmio = ndev->self_mmio;
1682
1683 if (ndev->b2b_idx == UINT_MAX) {
1684 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1685 b2b_bar = 0;
1686 ndev->b2b_off = 0;
1687 } else {
1688 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1689 if (b2b_bar < 0)
1690 return -EIO;
1691
1692 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
1693
1694 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1695
1696 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
1697
1698 if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
1699 dev_dbg(ndev_dev(ndev),
1700 "b2b using first half of bar\n");
1701 ndev->b2b_off = bar_size >> 1;
1702 } else if (bar_size >= XEON_B2B_MIN_SIZE) {
1703 dev_dbg(ndev_dev(ndev),
1704 "b2b using whole bar\n");
1705 ndev->b2b_off = 0;
1706 --ndev->mw_count;
1707 } else {
1708 dev_dbg(ndev_dev(ndev),
1709 "b2b bar size is too small\n");
1710 return -EIO;
1711 }
1712 }
1713
1714 /*
1715 * Reset the secondary bar sizes to match the primary bar sizes,
1716 * except disable or halve the size of the b2b secondary bar.
1717 */
1718 pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
1719 dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz);
1720 if (b2b_bar == 1) {
1721 if (ndev->b2b_off)
1722 bar_sz -= 1;
1723 else
1724 bar_sz = 0;
1725 }
1726
1727 pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
1728 pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
1729 dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz);
1730
1731 pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
1732 dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz);
1733 if (b2b_bar == 2) {
1734 if (ndev->b2b_off)
1735 bar_sz -= 1;
1736 else
1737 bar_sz = 0;
1738 }
1739
1740 pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
1741 pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
1742 dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz);
1743
1744 /* SBAR01 hit by first part of the b2b bar */
1745 if (b2b_bar == 0)
1746 bar_addr = addr->bar0_addr;
1747 else if (b2b_bar == 1)
1748 bar_addr = addr->bar2_addr64;
1749 else if (b2b_bar == 2)
1750 bar_addr = addr->bar4_addr64;
1751 else
1752 return -EIO;
1753
1754 /* setup incoming bar limits == base addrs (zero length windows) */
1755 bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
1756 iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
1757 bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
1758 dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr);
1759
1760 bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1761 iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
1762 bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
1763 dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr);
1764
1765 /* zero incoming translation addrs */
1766 iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
1767 iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
1768
1769 ndev->peer_mmio = ndev->self_mmio;
1770
1771 return 0;
1772}
1773
1774static int skx_init_ntb(struct intel_ntb_dev *ndev)
1775{
1776 int rc;
1777
1778
1779 ndev->mw_count = XEON_MW_COUNT;
1780 ndev->spad_count = SKX_SPAD_COUNT;
1781 ndev->db_count = SKX_DB_COUNT;
1782 ndev->db_link_mask = SKX_DB_LINK_BIT;
1783
1784 /* DB fixup for using 31 right now */
1785 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
1786 ndev->db_link_mask |= BIT_ULL(31);
1787
1788 switch (ndev->ntb.topo) {
1789 case NTB_TOPO_B2B_USD:
1790 case NTB_TOPO_B2B_DSD:
1791 ndev->self_reg = &skx_pri_reg;
1792 ndev->peer_reg = &skx_b2b_reg;
1793 ndev->xlat_reg = &skx_sec_xlat;
1794
1795 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1796 rc = skx_setup_b2b_mw(ndev,
1797 &xeon_b2b_dsd_addr,
1798 &xeon_b2b_usd_addr);
1799 } else {
1800 rc = skx_setup_b2b_mw(ndev,
1801 &xeon_b2b_usd_addr,
1802 &xeon_b2b_dsd_addr);
1803 }
1804
1805 if (rc)
1806 return rc;
1807
1808 /* Enable Bus Master and Memory Space on the secondary side */
1809 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1810 ndev->self_mmio + SKX_SPCICMD_OFFSET);
1811
1812 break;
1813
1814 default:
1815 return -EINVAL;
1816 }
1817
1818 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1819
1820 ndev->reg->db_iowrite(ndev->db_valid_mask,
1821 ndev->self_mmio +
1822 ndev->self_reg->db_mask);
1823
1824 return 0;
1825}
1826
1827static int skx_init_dev(struct intel_ntb_dev *ndev)
1828{
1829 struct pci_dev *pdev;
1830 u8 ppd;
1831 int rc;
1832
1833 pdev = ndev_pdev(ndev);
1834
1835 ndev->reg = &skx_reg;
1836
1837 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1838 if (rc)
1839 return -EIO;
1840
1841 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1842 dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1843 ntb_topo_string(ndev->ntb.topo));
1844 if (ndev->ntb.topo == NTB_TOPO_NONE)
1845 return -EINVAL;
1846
1847 if (pdev_is_skx_xeon(pdev))
1848 ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
1849
1850 rc = skx_init_ntb(ndev);
1851 if (rc)
1852 return rc;
1853
1854 return skx_init_isr(ndev);
1855}
1856
1857static int intel_ntb3_link_enable(struct ntb_dev *ntb,
1858 enum ntb_speed max_speed,
1859 enum ntb_width max_width)
1860{
1861 struct intel_ntb_dev *ndev;
1862 u32 ntb_ctl;
1863
1864 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1865
1866 dev_dbg(ndev_dev(ndev),
1867 "Enabling link with max_speed %d max_width %d\n",
1868 max_speed, max_width);
1869
1870 if (max_speed != NTB_SPEED_AUTO)
1871 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
1872 if (max_width != NTB_WIDTH_AUTO)
1873 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
1874
1875 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1876 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1877 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1878 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1879 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1880
1881 return 0;
1882}
1883static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
1884 dma_addr_t addr, resource_size_t size)
1885{
1886 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1887 unsigned long xlat_reg, limit_reg;
1888 resource_size_t bar_size, mw_size;
1889 void __iomem *mmio;
1890 u64 base, limit, reg_val;
1891 int bar;
1892
1893 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1894 idx += 1;
1895
1896 bar = ndev_mw_to_bar(ndev, idx);
1897 if (bar < 0)
1898 return bar;
1899
1900 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1901
1902 if (idx == ndev->b2b_idx)
1903 mw_size = bar_size - ndev->b2b_off;
1904 else
1905 mw_size = bar_size;
1906
1907 /* hardware requires that addr is aligned to bar size */
1908 if (addr & (bar_size - 1))
1909 return -EINVAL;
1910
1911 /* make sure the range fits in the usable mw size */
1912 if (size > mw_size)
1913 return -EINVAL;
1914
1915 mmio = ndev->self_mmio;
1916 xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
1917 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
1918 base = pci_resource_start(ndev->ntb.pdev, bar);
1919
1920 /* Set the limit if supported, if size is not mw_size */
1921 if (limit_reg && size != mw_size)
1922 limit = base + size;
1923 else
1924 limit = base + mw_size;
1925
1926 /* set and verify setting the translation address */
1927 iowrite64(addr, mmio + xlat_reg);
1928 reg_val = ioread64(mmio + xlat_reg);
1929 if (reg_val != addr) {
1930 iowrite64(0, mmio + xlat_reg);
1931 return -EIO;
1932 }
1933
1934 dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
1935
1936 /* set and verify setting the limit */
1937 iowrite64(limit, mmio + limit_reg);
1938 reg_val = ioread64(mmio + limit_reg);
1939 if (reg_val != limit) {
1940 iowrite64(base, mmio + limit_reg);
1941 iowrite64(0, mmio + xlat_reg);
1942 return -EIO;
1943 }
1944
1945 dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
1946
1947 /* setup the EP */
1948 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
1949 base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
1950 base &= ~0xf;
1951
1952 if (limit_reg && size != mw_size)
1953 limit = base + size;
1954 else
1955 limit = base + mw_size;
1956
1957 /* set and verify setting the limit */
1958 iowrite64(limit, mmio + limit_reg);
1959 reg_val = ioread64(mmio + limit_reg);
1960 if (reg_val != limit) {
1961 iowrite64(base, mmio + limit_reg);
1962 iowrite64(0, mmio + xlat_reg);
1963 return -EIO;
1964 }
1965
1966 dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
1967
1968 return 0;
1969}
1970
1971static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1972{
1973 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1974 int bit;
1975
1976 if (db_bits & ~ndev->db_valid_mask)
1977 return -EINVAL;
1978
1979 while (db_bits) {
1980 bit = __ffs(db_bits);
1981 iowrite32(1, ndev->peer_mmio +
1982 ndev->peer_reg->db_bell + (bit * 4));
1983 db_bits &= db_bits - 1;
1984 }
1985
1986 return 0;
1987}
1988
1989static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
1990{
1991 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1992
1993 return ndev_db_read(ndev,
1994 ndev->self_mmio +
1995 ndev->self_reg->db_clear);
1996}
1997
1998static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
1999{
2000 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
2001
2002 return ndev_db_write(ndev, db_bits,
2003 ndev->self_mmio +
2004 ndev->self_reg->db_clear);
2005}
2006
Dave Jiang2f887b92015-05-20 12:55:47 -04002007/* XEON */
Allen Hubbee26a5842015-04-09 10:33:20 -04002008
Dave Jiang2f887b92015-05-20 12:55:47 -04002009static u64 xeon_db_ioread(void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04002010{
2011 return (u64)ioread16(mmio);
2012}
2013
Dave Jiang2f887b92015-05-20 12:55:47 -04002014static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04002015{
2016 iowrite16((u16)bits, mmio);
2017}
2018
Dave Jiang2f887b92015-05-20 12:55:47 -04002019static int xeon_poll_link(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002020{
2021 u16 reg_val;
2022 int rc;
2023
2024 ndev->reg->db_iowrite(ndev->db_link_mask,
2025 ndev->self_mmio +
2026 ndev->self_reg->db_bell);
2027
2028 rc = pci_read_config_word(ndev->ntb.pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -04002029 XEON_LINK_STATUS_OFFSET, &reg_val);
Allen Hubbee26a5842015-04-09 10:33:20 -04002030 if (rc)
2031 return 0;
2032
2033 if (reg_val == ndev->lnk_sta)
2034 return 0;
2035
2036 ndev->lnk_sta = reg_val;
2037
2038 return 1;
2039}
2040
Dave Jiang2f887b92015-05-20 12:55:47 -04002041static int xeon_link_is_up(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002042{
Dave Jiang5ae0beb2015-05-19 16:59:34 -04002043 if (ndev->ntb.topo == NTB_TOPO_SEC)
2044 return 1;
2045
Allen Hubbee26a5842015-04-09 10:33:20 -04002046 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
2047}
2048
Dave Jiang2f887b92015-05-20 12:55:47 -04002049static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04002050{
Dave Jiang2f887b92015-05-20 12:55:47 -04002051 switch (ppd & XEON_PPD_TOPO_MASK) {
2052 case XEON_PPD_TOPO_B2B_USD:
Allen Hubbee26a5842015-04-09 10:33:20 -04002053 return NTB_TOPO_B2B_USD;
2054
Dave Jiang2f887b92015-05-20 12:55:47 -04002055 case XEON_PPD_TOPO_B2B_DSD:
Allen Hubbee26a5842015-04-09 10:33:20 -04002056 return NTB_TOPO_B2B_DSD;
2057
Dave Jiang2f887b92015-05-20 12:55:47 -04002058 case XEON_PPD_TOPO_PRI_USD:
2059 case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04002060 return NTB_TOPO_PRI;
2061
Dave Jiang2f887b92015-05-20 12:55:47 -04002062 case XEON_PPD_TOPO_SEC_USD:
2063 case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04002064 return NTB_TOPO_SEC;
2065 }
2066
2067 return NTB_TOPO_NONE;
2068}
2069
Dave Jiang2f887b92015-05-20 12:55:47 -04002070static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04002071{
Dave Jiang2f887b92015-05-20 12:55:47 -04002072 if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002073 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
2074 return 1;
2075 }
2076 return 0;
2077}
2078
Dave Jiang2f887b92015-05-20 12:55:47 -04002079static int xeon_init_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002080{
Dave Jiang2f887b92015-05-20 12:55:47 -04002081 return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
2082 XEON_DB_MSIX_VECTOR_COUNT,
2083 XEON_DB_MSIX_VECTOR_SHIFT,
2084 XEON_DB_TOTAL_SHIFT);
Allen Hubbee26a5842015-04-09 10:33:20 -04002085}
2086
Dave Jiang2f887b92015-05-20 12:55:47 -04002087static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002088{
2089 ndev_deinit_isr(ndev);
2090}
2091
Dave Jiang2f887b92015-05-20 12:55:47 -04002092static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
2093 const struct intel_b2b_addr *addr,
2094 const struct intel_b2b_addr *peer_addr)
Allen Hubbee26a5842015-04-09 10:33:20 -04002095{
2096 struct pci_dev *pdev;
2097 void __iomem *mmio;
2098 resource_size_t bar_size;
2099 phys_addr_t bar_addr;
2100 int b2b_bar;
2101 u8 bar_sz;
2102
2103 pdev = ndev_pdev(ndev);
2104 mmio = ndev->self_mmio;
2105
Allen Hubbe2aa2a77a2015-08-31 09:30:59 -04002106 if (ndev->b2b_idx == UINT_MAX) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002107 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
2108 b2b_bar = 0;
2109 ndev->b2b_off = 0;
2110 } else {
2111 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
2112 if (b2b_bar < 0)
2113 return -EIO;
2114
2115 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
2116
2117 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
2118
2119 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
2120
Dave Jiang2f887b92015-05-20 12:55:47 -04002121 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002122 dev_dbg(ndev_dev(ndev),
2123 "b2b using first half of bar\n");
2124 ndev->b2b_off = bar_size >> 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04002125 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002126 dev_dbg(ndev_dev(ndev),
2127 "b2b using whole bar\n");
2128 ndev->b2b_off = 0;
2129 --ndev->mw_count;
2130 } else {
2131 dev_dbg(ndev_dev(ndev),
2132 "b2b bar size is too small\n");
2133 return -EIO;
2134 }
2135 }
2136
2137 /* Reset the secondary bar sizes to match the primary bar sizes,
2138 * except disable or halve the size of the b2b secondary bar.
2139 *
2140 * Note: code for each specific bar size register, because the register
2141 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
2142 */
Dave Jiang2f887b92015-05-20 12:55:47 -04002143 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002144 dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
2145 if (b2b_bar == 2) {
2146 if (ndev->b2b_off)
2147 bar_sz -= 1;
2148 else
2149 bar_sz = 0;
2150 }
Dave Jiang2f887b92015-05-20 12:55:47 -04002151 pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
2152 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002153 dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
2154
2155 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002156 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002157 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
2158 if (b2b_bar == 4) {
2159 if (ndev->b2b_off)
2160 bar_sz -= 1;
2161 else
2162 bar_sz = 0;
2163 }
Dave Jiang2f887b92015-05-20 12:55:47 -04002164 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
2165 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002166 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
2167 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002168 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002169 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
2170 if (b2b_bar == 4) {
2171 if (ndev->b2b_off)
2172 bar_sz -= 1;
2173 else
2174 bar_sz = 0;
2175 }
Dave Jiang2f887b92015-05-20 12:55:47 -04002176 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
2177 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002178 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
2179
Dave Jiang2f887b92015-05-20 12:55:47 -04002180 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002181 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
2182 if (b2b_bar == 5) {
2183 if (ndev->b2b_off)
2184 bar_sz -= 1;
2185 else
2186 bar_sz = 0;
2187 }
Dave Jiang2f887b92015-05-20 12:55:47 -04002188 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
2189 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04002190 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
2191 }
2192
2193 /* SBAR01 hit by first part of the b2b bar */
2194 if (b2b_bar == 0)
2195 bar_addr = addr->bar0_addr;
2196 else if (b2b_bar == 2)
2197 bar_addr = addr->bar2_addr64;
2198 else if (b2b_bar == 4 && !ndev->bar4_split)
2199 bar_addr = addr->bar4_addr64;
2200 else if (b2b_bar == 4)
2201 bar_addr = addr->bar4_addr32;
2202 else if (b2b_bar == 5)
2203 bar_addr = addr->bar5_addr32;
2204 else
2205 return -EIO;
2206
2207 dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
Dave Jiang2f887b92015-05-20 12:55:47 -04002208 iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002209
2210 /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
2211 * The b2b bar is either disabled above, or configured half-size, and
2212 * it starts at the PBAR xlat + offset.
2213 */
2214
2215 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002216 iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
2217 bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002218 dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
2219
2220 if (!ndev->bar4_split) {
2221 bar_addr = addr->bar4_addr64 +
2222 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002223 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
2224 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002225 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
2226 } else {
2227 bar_addr = addr->bar4_addr32 +
2228 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002229 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
2230 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002231 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
2232
2233 bar_addr = addr->bar5_addr32 +
2234 (b2b_bar == 5 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002235 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
2236 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002237 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
2238 }
2239
2240 /* setup incoming bar limits == base addrs (zero length windows) */
2241
2242 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002243 iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
2244 bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002245 dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
2246
2247 if (!ndev->bar4_split) {
2248 bar_addr = addr->bar4_addr64 +
2249 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002250 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
2251 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002252 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
2253 } else {
2254 bar_addr = addr->bar4_addr32 +
2255 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002256 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
2257 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002258 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
2259
2260 bar_addr = addr->bar5_addr32 +
2261 (b2b_bar == 5 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04002262 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
2263 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002264 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
2265 }
2266
2267 /* zero incoming translation addrs */
Dave Jiang2f887b92015-05-20 12:55:47 -04002268 iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002269
2270 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002271 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002272 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002273 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
2274 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002275 }
2276
2277 /* zero outgoing translation limits (whole bar size windows) */
Dave Jiang2f887b92015-05-20 12:55:47 -04002278 iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002279 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002280 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002281 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002282 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
2283 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002284 }
2285
2286 /* set outgoing translation offsets */
2287 bar_addr = peer_addr->bar2_addr64;
Dave Jiang2f887b92015-05-20 12:55:47 -04002288 iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
2289 bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002290 dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
2291
2292 if (!ndev->bar4_split) {
2293 bar_addr = peer_addr->bar4_addr64;
Dave Jiang2f887b92015-05-20 12:55:47 -04002294 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
2295 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002296 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
2297 } else {
2298 bar_addr = peer_addr->bar4_addr32;
Dave Jiang2f887b92015-05-20 12:55:47 -04002299 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
2300 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002301 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
2302
2303 bar_addr = peer_addr->bar5_addr32;
Dave Jiang2f887b92015-05-20 12:55:47 -04002304 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
2305 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002306 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
2307 }
2308
2309 /* set the translation offset for b2b registers */
2310 if (b2b_bar == 0)
2311 bar_addr = peer_addr->bar0_addr;
2312 else if (b2b_bar == 2)
2313 bar_addr = peer_addr->bar2_addr64;
2314 else if (b2b_bar == 4 && !ndev->bar4_split)
2315 bar_addr = peer_addr->bar4_addr64;
2316 else if (b2b_bar == 4)
2317 bar_addr = peer_addr->bar4_addr32;
2318 else if (b2b_bar == 5)
2319 bar_addr = peer_addr->bar5_addr32;
2320 else
2321 return -EIO;
2322
2323 /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
2324 dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
Dave Jiang2f887b92015-05-20 12:55:47 -04002325 iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
2326 iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
Allen Hubbee26a5842015-04-09 10:33:20 -04002327
2328 if (b2b_bar) {
2329 /* map peer ntb mmio config space registers */
2330 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
Dave Jiang2f887b92015-05-20 12:55:47 -04002331 XEON_B2B_MIN_SIZE);
Allen Hubbee26a5842015-04-09 10:33:20 -04002332 if (!ndev->peer_mmio)
2333 return -EIO;
Dave Jiang25ea9f22016-10-27 11:06:44 -07002334
2335 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
Allen Hubbee26a5842015-04-09 10:33:20 -04002336 }
2337
2338 return 0;
2339}
2340
Dave Jiang2f887b92015-05-20 12:55:47 -04002341static int xeon_init_ntb(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002342{
2343 int rc;
Dave Jiang5ae0beb2015-05-19 16:59:34 -04002344 u32 ntb_ctl;
Allen Hubbee26a5842015-04-09 10:33:20 -04002345
2346 if (ndev->bar4_split)
2347 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
2348 else
Dave Jiang2f887b92015-05-20 12:55:47 -04002349 ndev->mw_count = XEON_MW_COUNT;
Allen Hubbee26a5842015-04-09 10:33:20 -04002350
Dave Jiang2f887b92015-05-20 12:55:47 -04002351 ndev->spad_count = XEON_SPAD_COUNT;
2352 ndev->db_count = XEON_DB_COUNT;
2353 ndev->db_link_mask = XEON_DB_LINK_BIT;
Allen Hubbee26a5842015-04-09 10:33:20 -04002354
2355 switch (ndev->ntb.topo) {
2356 case NTB_TOPO_PRI:
2357 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2358 dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
2359 return -EINVAL;
2360 }
Dave Jiang5ae0beb2015-05-19 16:59:34 -04002361
2362 /* enable link to allow secondary side device to appear */
2363 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
2364 ntb_ctl &= ~NTB_CTL_DISABLE;
2365 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
2366
Allen Hubbee26a5842015-04-09 10:33:20 -04002367 /* use half the spads for the peer */
2368 ndev->spad_count >>= 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04002369 ndev->self_reg = &xeon_pri_reg;
2370 ndev->peer_reg = &xeon_sec_reg;
2371 ndev->xlat_reg = &xeon_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04002372 break;
2373
2374 case NTB_TOPO_SEC:
2375 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2376 dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
2377 return -EINVAL;
2378 }
2379 /* use half the spads for the peer */
2380 ndev->spad_count >>= 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04002381 ndev->self_reg = &xeon_sec_reg;
2382 ndev->peer_reg = &xeon_pri_reg;
2383 ndev->xlat_reg = &xeon_pri_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04002384 break;
2385
2386 case NTB_TOPO_B2B_USD:
2387 case NTB_TOPO_B2B_DSD:
Dave Jiang2f887b92015-05-20 12:55:47 -04002388 ndev->self_reg = &xeon_pri_reg;
2389 ndev->peer_reg = &xeon_b2b_reg;
2390 ndev->xlat_reg = &xeon_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04002391
2392 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002393 ndev->peer_reg = &xeon_pri_reg;
Allen Hubbee26a5842015-04-09 10:33:20 -04002394
2395 if (b2b_mw_idx < 0)
2396 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
2397 else
2398 ndev->b2b_idx = b2b_mw_idx;
2399
Allen Hubbe2aa2a77a2015-08-31 09:30:59 -04002400 if (ndev->b2b_idx >= ndev->mw_count) {
2401 dev_dbg(ndev_dev(ndev),
2402 "b2b_mw_idx %d invalid for mw_count %u\n",
2403 b2b_mw_idx, ndev->mw_count);
2404 return -EINVAL;
2405 }
2406
Allen Hubbee26a5842015-04-09 10:33:20 -04002407 dev_dbg(ndev_dev(ndev),
2408 "setting up b2b mw idx %d means %d\n",
2409 b2b_mw_idx, ndev->b2b_idx);
2410
2411 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
2412 dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
2413 ndev->db_count -= 1;
2414 }
2415
2416 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002417 rc = xeon_setup_b2b_mw(ndev,
2418 &xeon_b2b_dsd_addr,
2419 &xeon_b2b_usd_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002420 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04002421 rc = xeon_setup_b2b_mw(ndev,
2422 &xeon_b2b_usd_addr,
2423 &xeon_b2b_dsd_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04002424 }
2425 if (rc)
2426 return rc;
2427
2428 /* Enable Bus Master and Memory Space on the secondary side */
2429 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
Dave Jiang2f887b92015-05-20 12:55:47 -04002430 ndev->self_mmio + XEON_SPCICMD_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04002431
2432 break;
2433
2434 default:
2435 return -EINVAL;
2436 }
2437
2438 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
2439
2440 ndev->reg->db_iowrite(ndev->db_valid_mask,
2441 ndev->self_mmio +
2442 ndev->self_reg->db_mask);
2443
2444 return 0;
2445}
2446
Dave Jiang2f887b92015-05-20 12:55:47 -04002447static int xeon_init_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002448{
2449 struct pci_dev *pdev;
2450 u8 ppd;
2451 int rc, mem;
2452
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002453 pdev = ndev_pdev(ndev);
2454
2455 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002456 /* There is a Xeon hardware errata related to writes to SDOORBELL or
2457 * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
2458 * which may hang the system. To workaround this use the second memory
2459 * window to access the interrupt and scratch pad registers on the
2460 * remote system.
2461 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002462 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2463 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2464 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2465 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2466 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2467 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2468 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2469 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2470 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2471 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2472 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2473 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002474 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2475 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2476 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002477 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
2478 break;
2479 }
Allen Hubbee26a5842015-04-09 10:33:20 -04002480
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002481 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002482 /* There is a hardware errata related to accessing any register in
2483 * SB01BASE in the presence of bidirectional traffic crossing the NTB.
2484 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002485 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2486 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2487 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2488 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2489 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2490 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002491 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2492 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2493 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002494 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
2495 break;
2496 }
Allen Hubbee26a5842015-04-09 10:33:20 -04002497
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002498 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04002499 /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
2500 * mirrored to the remote system. Shrink the number of bits by one,
2501 * since bit 14 is the last bit.
2502 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002503 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2504 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2505 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2506 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2507 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2508 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2509 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2510 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2511 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2512 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2513 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2514 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002515 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2516 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2517 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04002518 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
2519 break;
2520 }
Allen Hubbee26a5842015-04-09 10:33:20 -04002521
Dave Jiang2f887b92015-05-20 12:55:47 -04002522 ndev->reg = &xeon_reg;
Allen Hubbee26a5842015-04-09 10:33:20 -04002523
Dave Jiang2f887b92015-05-20 12:55:47 -04002524 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04002525 if (rc)
2526 return -EIO;
2527
Dave Jiang2f887b92015-05-20 12:55:47 -04002528 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04002529 dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
2530 ntb_topo_string(ndev->ntb.topo));
2531 if (ndev->ntb.topo == NTB_TOPO_NONE)
2532 return -EINVAL;
2533
2534 if (ndev->ntb.topo != NTB_TOPO_SEC) {
Dave Jiang2f887b92015-05-20 12:55:47 -04002535 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04002536 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
2537 ppd, ndev->bar4_split);
2538 } else {
2539 /* This is a way for transparent BAR to figure out if we are
2540 * doing split BAR or not. There is no way for the hw on the
2541 * transparent side to know and set the PPD.
2542 */
2543 mem = pci_select_bars(pdev, IORESOURCE_MEM);
2544 ndev->bar4_split = hweight32(mem) ==
2545 HSX_SPLIT_BAR_MW_COUNT + 1;
2546 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
2547 mem, ndev->bar4_split);
2548 }
2549
Dave Jiang2f887b92015-05-20 12:55:47 -04002550 rc = xeon_init_ntb(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002551 if (rc)
2552 return rc;
2553
Dave Jiang2f887b92015-05-20 12:55:47 -04002554 return xeon_init_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002555}
2556
Dave Jiang2f887b92015-05-20 12:55:47 -04002557static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04002558{
Dave Jiang2f887b92015-05-20 12:55:47 -04002559 xeon_deinit_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002560}
2561
2562static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
2563{
2564 int rc;
2565
2566 pci_set_drvdata(pdev, ndev);
2567
2568 rc = pci_enable_device(pdev);
2569 if (rc)
2570 goto err_pci_enable;
2571
2572 rc = pci_request_regions(pdev, NTB_NAME);
2573 if (rc)
2574 goto err_pci_regions;
2575
2576 pci_set_master(pdev);
2577
2578 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2579 if (rc) {
2580 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2581 if (rc)
2582 goto err_dma_mask;
2583 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
2584 }
2585
2586 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2587 if (rc) {
2588 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2589 if (rc)
2590 goto err_dma_mask;
2591 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
2592 }
2593
2594 ndev->self_mmio = pci_iomap(pdev, 0, 0);
2595 if (!ndev->self_mmio) {
2596 rc = -EIO;
2597 goto err_mmio;
2598 }
2599 ndev->peer_mmio = ndev->self_mmio;
Dave Jiang25ea9f22016-10-27 11:06:44 -07002600 ndev->peer_addr = pci_resource_start(pdev, 0);
Allen Hubbee26a5842015-04-09 10:33:20 -04002601
2602 return 0;
2603
2604err_mmio:
2605err_dma_mask:
2606 pci_clear_master(pdev);
2607 pci_release_regions(pdev);
2608err_pci_regions:
2609 pci_disable_device(pdev);
2610err_pci_enable:
2611 pci_set_drvdata(pdev, NULL);
2612 return rc;
2613}
2614
2615static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
2616{
2617 struct pci_dev *pdev = ndev_pdev(ndev);
2618
2619 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
2620 pci_iounmap(pdev, ndev->peer_mmio);
2621 pci_iounmap(pdev, ndev->self_mmio);
2622
2623 pci_clear_master(pdev);
2624 pci_release_regions(pdev);
2625 pci_disable_device(pdev);
2626 pci_set_drvdata(pdev, NULL);
2627}
2628
2629static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
2630 struct pci_dev *pdev)
2631{
2632 ndev->ntb.pdev = pdev;
2633 ndev->ntb.topo = NTB_TOPO_NONE;
2634 ndev->ntb.ops = &intel_ntb_ops;
2635
2636 ndev->b2b_off = 0;
Allen Hubbe2aa2a77a2015-08-31 09:30:59 -04002637 ndev->b2b_idx = UINT_MAX;
Allen Hubbee26a5842015-04-09 10:33:20 -04002638
2639 ndev->bar4_split = 0;
2640
2641 ndev->mw_count = 0;
2642 ndev->spad_count = 0;
2643 ndev->db_count = 0;
2644 ndev->db_vec_count = 0;
2645 ndev->db_vec_shift = 0;
2646
2647 ndev->ntb_ctl = 0;
2648 ndev->lnk_sta = 0;
2649
2650 ndev->db_valid_mask = 0;
2651 ndev->db_link_mask = 0;
2652 ndev->db_mask = 0;
2653
2654 spin_lock_init(&ndev->db_mask_lock);
2655}
2656
2657static int intel_ntb_pci_probe(struct pci_dev *pdev,
2658 const struct pci_device_id *id)
2659{
2660 struct intel_ntb_dev *ndev;
Allen Hubbe0e041fb2015-05-19 12:04:52 -04002661 int rc, node;
2662
2663 node = dev_to_node(&pdev->dev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002664
Dave Jiang2f887b92015-05-20 12:55:47 -04002665 if (pdev_is_atom(pdev)) {
Allen Hubbe0e041fb2015-05-19 12:04:52 -04002666 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -04002667 if (!ndev) {
2668 rc = -ENOMEM;
2669 goto err_ndev;
2670 }
2671
2672 ndev_init_struct(ndev, pdev);
2673
2674 rc = intel_ntb_init_pci(ndev, pdev);
2675 if (rc)
2676 goto err_init_pci;
2677
Dave Jiang2f887b92015-05-20 12:55:47 -04002678 rc = atom_init_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002679 if (rc)
2680 goto err_init_dev;
2681
Dave Jiang2f887b92015-05-20 12:55:47 -04002682 } else if (pdev_is_xeon(pdev)) {
Allen Hubbe0e041fb2015-05-19 12:04:52 -04002683 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -04002684 if (!ndev) {
2685 rc = -ENOMEM;
2686 goto err_ndev;
2687 }
2688
2689 ndev_init_struct(ndev, pdev);
2690
2691 rc = intel_ntb_init_pci(ndev, pdev);
2692 if (rc)
2693 goto err_init_pci;
2694
Dave Jiang2f887b92015-05-20 12:55:47 -04002695 rc = xeon_init_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002696 if (rc)
2697 goto err_init_dev;
2698
Dave Jiang783dfa62016-11-16 14:03:38 -07002699 } else if (pdev_is_skx_xeon(pdev)) {
2700 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2701 if (!ndev) {
2702 rc = -ENOMEM;
2703 goto err_ndev;
2704 }
2705
2706 ndev_init_struct(ndev, pdev);
2707 ndev->ntb.ops = &intel_ntb3_ops;
2708
2709 rc = intel_ntb_init_pci(ndev, pdev);
2710 if (rc)
2711 goto err_init_pci;
2712
2713 rc = skx_init_dev(ndev);
2714 if (rc)
2715 goto err_init_dev;
2716
Allen Hubbee26a5842015-04-09 10:33:20 -04002717 } else {
2718 rc = -EINVAL;
2719 goto err_ndev;
2720 }
2721
2722 ndev_reset_unsafe_flags(ndev);
2723
2724 ndev->reg->poll_link(ndev);
2725
2726 ndev_init_debugfs(ndev);
2727
2728 rc = ntb_register_device(&ndev->ntb);
2729 if (rc)
2730 goto err_register;
2731
Dave Jiang7eb38782015-06-15 08:21:33 -04002732 dev_info(&pdev->dev, "NTB device registered.\n");
2733
Allen Hubbee26a5842015-04-09 10:33:20 -04002734 return 0;
2735
2736err_register:
2737 ndev_deinit_debugfs(ndev);
Dave Jiang2f887b92015-05-20 12:55:47 -04002738 if (pdev_is_atom(pdev))
2739 atom_deinit_dev(ndev);
Dave Jiang783dfa62016-11-16 14:03:38 -07002740 else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
Dave Jiang2f887b92015-05-20 12:55:47 -04002741 xeon_deinit_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002742err_init_dev:
2743 intel_ntb_deinit_pci(ndev);
2744err_init_pci:
2745 kfree(ndev);
2746err_ndev:
2747 return rc;
2748}
2749
2750static void intel_ntb_pci_remove(struct pci_dev *pdev)
2751{
2752 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
2753
2754 ntb_unregister_device(&ndev->ntb);
2755 ndev_deinit_debugfs(ndev);
Dave Jiang2f887b92015-05-20 12:55:47 -04002756 if (pdev_is_atom(pdev))
2757 atom_deinit_dev(ndev);
Dave Jiang783dfa62016-11-16 14:03:38 -07002758 else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
Dave Jiang2f887b92015-05-20 12:55:47 -04002759 xeon_deinit_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04002760 intel_ntb_deinit_pci(ndev);
2761 kfree(ndev);
2762}
2763
Dave Jiang2f887b92015-05-20 12:55:47 -04002764static const struct intel_ntb_reg atom_reg = {
2765 .poll_link = atom_poll_link,
2766 .link_is_up = atom_link_is_up,
2767 .db_ioread = atom_db_ioread,
2768 .db_iowrite = atom_db_iowrite,
Allen Hubbee26a5842015-04-09 10:33:20 -04002769 .db_size = sizeof(u64),
Dave Jiang2f887b92015-05-20 12:55:47 -04002770 .ntb_ctl = ATOM_NTBCNTL_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002771 .mw_bar = {2, 4},
2772};
2773
Dave Jiang2f887b92015-05-20 12:55:47 -04002774static const struct intel_ntb_alt_reg atom_pri_reg = {
2775 .db_bell = ATOM_PDOORBELL_OFFSET,
2776 .db_mask = ATOM_PDBMSK_OFFSET,
2777 .spad = ATOM_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002778};
2779
Dave Jiang2f887b92015-05-20 12:55:47 -04002780static const struct intel_ntb_alt_reg atom_b2b_reg = {
2781 .db_bell = ATOM_B2B_DOORBELL_OFFSET,
2782 .spad = ATOM_B2B_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002783};
2784
Dave Jiang2f887b92015-05-20 12:55:47 -04002785static const struct intel_ntb_xlat_reg atom_sec_xlat = {
2786 /* FIXME : .bar0_base = ATOM_SBAR0BASE_OFFSET, */
2787 /* FIXME : .bar2_limit = ATOM_SBAR2LMT_OFFSET, */
2788 .bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002789};
2790
Dave Jiang2f887b92015-05-20 12:55:47 -04002791static const struct intel_ntb_reg xeon_reg = {
2792 .poll_link = xeon_poll_link,
2793 .link_is_up = xeon_link_is_up,
2794 .db_ioread = xeon_db_ioread,
2795 .db_iowrite = xeon_db_iowrite,
Allen Hubbee26a5842015-04-09 10:33:20 -04002796 .db_size = sizeof(u32),
Dave Jiang2f887b92015-05-20 12:55:47 -04002797 .ntb_ctl = XEON_NTBCNTL_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002798 .mw_bar = {2, 4, 5},
2799};
2800
Dave Jiang2f887b92015-05-20 12:55:47 -04002801static const struct intel_ntb_alt_reg xeon_pri_reg = {
2802 .db_bell = XEON_PDOORBELL_OFFSET,
2803 .db_mask = XEON_PDBMSK_OFFSET,
2804 .spad = XEON_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002805};
2806
Dave Jiang2f887b92015-05-20 12:55:47 -04002807static const struct intel_ntb_alt_reg xeon_sec_reg = {
2808 .db_bell = XEON_SDOORBELL_OFFSET,
2809 .db_mask = XEON_SDBMSK_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002810 /* second half of the scratchpads */
Dave Jiang2f887b92015-05-20 12:55:47 -04002811 .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
Allen Hubbee26a5842015-04-09 10:33:20 -04002812};
2813
Dave Jiang2f887b92015-05-20 12:55:47 -04002814static const struct intel_ntb_alt_reg xeon_b2b_reg = {
2815 .db_bell = XEON_B2B_DOORBELL_OFFSET,
2816 .spad = XEON_B2B_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002817};
2818
Dave Jiang2f887b92015-05-20 12:55:47 -04002819static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
Allen Hubbee26a5842015-04-09 10:33:20 -04002820 /* Note: no primary .bar0_base visible to the secondary side.
2821 *
2822 * The secondary side cannot get the base address stored in primary
2823 * bars. The base address is necessary to set the limit register to
2824 * any value other than zero, or unlimited.
2825 *
2826 * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2827 * window by setting the limit equal to base, nor can it limit the size
2828 * of the memory window by setting the limit to base + size.
2829 */
Dave Jiang2f887b92015-05-20 12:55:47 -04002830 .bar2_limit = XEON_PBAR23LMT_OFFSET,
2831 .bar2_xlat = XEON_PBAR23XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002832};
2833
Dave Jiang2f887b92015-05-20 12:55:47 -04002834static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
2835 .bar0_base = XEON_SBAR0BASE_OFFSET,
2836 .bar2_limit = XEON_SBAR23LMT_OFFSET,
2837 .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04002838};
2839
Dave Jiang2f887b92015-05-20 12:55:47 -04002840static struct intel_b2b_addr xeon_b2b_usd_addr = {
Dave Jiang8b782fa2015-09-24 13:03:05 -07002841 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
2842 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
2843 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
2844 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
Allen Hubbee26a5842015-04-09 10:33:20 -04002845};
2846
Dave Jiang2f887b92015-05-20 12:55:47 -04002847static struct intel_b2b_addr xeon_b2b_dsd_addr = {
Dave Jiang8b782fa2015-09-24 13:03:05 -07002848 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
2849 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
2850 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
2851 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
Allen Hubbee26a5842015-04-09 10:33:20 -04002852};
2853
Dave Jiang783dfa62016-11-16 14:03:38 -07002854static const struct intel_ntb_reg skx_reg = {
2855 .poll_link = xeon_poll_link,
2856 .link_is_up = xeon_link_is_up,
2857 .db_ioread = skx_db_ioread,
2858 .db_iowrite = skx_db_iowrite,
2859 .db_size = sizeof(u64),
2860 .ntb_ctl = SKX_NTBCNTL_OFFSET,
2861 .mw_bar = {2, 4},
2862};
2863
2864static const struct intel_ntb_alt_reg skx_pri_reg = {
2865 .db_bell = SKX_EM_DOORBELL_OFFSET,
2866 .db_clear = SKX_IM_INT_STATUS_OFFSET,
2867 .db_mask = SKX_IM_INT_DISABLE_OFFSET,
2868 .spad = SKX_IM_SPAD_OFFSET,
2869};
2870
2871static const struct intel_ntb_alt_reg skx_b2b_reg = {
2872 .db_bell = SKX_IM_DOORBELL_OFFSET,
2873 .db_clear = SKX_EM_INT_STATUS_OFFSET,
2874 .db_mask = SKX_EM_INT_DISABLE_OFFSET,
2875 .spad = SKX_B2B_SPAD_OFFSET,
2876};
2877
2878static const struct intel_ntb_xlat_reg skx_sec_xlat = {
2879/* .bar0_base = SKX_EMBAR0_OFFSET, */
2880 .bar2_limit = SKX_IMBAR1XLMT_OFFSET,
2881 .bar2_xlat = SKX_IMBAR1XBASE_OFFSET,
2882};
2883
Allen Hubbee26a5842015-04-09 10:33:20 -04002884/* operations for primary side of local ntb */
2885static const struct ntb_dev_ops intel_ntb_ops = {
2886 .mw_count = intel_ntb_mw_count,
2887 .mw_get_range = intel_ntb_mw_get_range,
2888 .mw_set_trans = intel_ntb_mw_set_trans,
2889 .link_is_up = intel_ntb_link_is_up,
2890 .link_enable = intel_ntb_link_enable,
2891 .link_disable = intel_ntb_link_disable,
2892 .db_is_unsafe = intel_ntb_db_is_unsafe,
2893 .db_valid_mask = intel_ntb_db_valid_mask,
2894 .db_vector_count = intel_ntb_db_vector_count,
2895 .db_vector_mask = intel_ntb_db_vector_mask,
2896 .db_read = intel_ntb_db_read,
2897 .db_clear = intel_ntb_db_clear,
2898 .db_set_mask = intel_ntb_db_set_mask,
2899 .db_clear_mask = intel_ntb_db_clear_mask,
2900 .peer_db_addr = intel_ntb_peer_db_addr,
2901 .peer_db_set = intel_ntb_peer_db_set,
2902 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2903 .spad_count = intel_ntb_spad_count,
2904 .spad_read = intel_ntb_spad_read,
2905 .spad_write = intel_ntb_spad_write,
2906 .peer_spad_addr = intel_ntb_peer_spad_addr,
2907 .peer_spad_read = intel_ntb_peer_spad_read,
2908 .peer_spad_write = intel_ntb_peer_spad_write,
2909};
2910
Dave Jiang783dfa62016-11-16 14:03:38 -07002911static const struct ntb_dev_ops intel_ntb3_ops = {
2912 .mw_count = intel_ntb_mw_count,
2913 .mw_get_range = intel_ntb_mw_get_range,
2914 .mw_set_trans = intel_ntb3_mw_set_trans,
2915 .link_is_up = intel_ntb_link_is_up,
2916 .link_enable = intel_ntb3_link_enable,
2917 .link_disable = intel_ntb_link_disable,
2918 .db_valid_mask = intel_ntb_db_valid_mask,
2919 .db_vector_count = intel_ntb_db_vector_count,
2920 .db_vector_mask = intel_ntb_db_vector_mask,
2921 .db_read = intel_ntb3_db_read,
2922 .db_clear = intel_ntb3_db_clear,
2923 .db_set_mask = intel_ntb_db_set_mask,
2924 .db_clear_mask = intel_ntb_db_clear_mask,
2925 .peer_db_addr = intel_ntb_peer_db_addr,
2926 .peer_db_set = intel_ntb3_peer_db_set,
2927 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2928 .spad_count = intel_ntb_spad_count,
2929 .spad_read = intel_ntb_spad_read,
2930 .spad_write = intel_ntb_spad_write,
2931 .peer_spad_addr = intel_ntb_peer_spad_addr,
2932 .peer_spad_read = intel_ntb_peer_spad_read,
2933 .peer_spad_write = intel_ntb_peer_spad_write,
2934};
2935
Allen Hubbee26a5842015-04-09 10:33:20 -04002936static const struct file_operations intel_ntb_debugfs_info = {
2937 .owner = THIS_MODULE,
2938 .open = simple_open,
2939 .read = ndev_debugfs_read,
2940};
2941
2942static const struct pci_device_id intel_ntb_pci_tbl[] = {
Jon Masonfce8a7b2012-11-16 19:27:12 -07002943 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2944 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002945 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002946 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2947 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002948 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002949 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2950 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2951 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2952 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002953 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002954 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2955 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2956 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2957 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002958 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
Dave Jiang783dfa62016-11-16 14:03:38 -07002959 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002960 {0}
2961};
Allen Hubbee26a5842015-04-09 10:33:20 -04002962MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
Jon Masonfce8a7b2012-11-16 19:27:12 -07002963
Allen Hubbee26a5842015-04-09 10:33:20 -04002964static struct pci_driver intel_ntb_pci_driver = {
2965 .name = KBUILD_MODNAME,
2966 .id_table = intel_ntb_pci_tbl,
2967 .probe = intel_ntb_pci_probe,
2968 .remove = intel_ntb_pci_remove,
Jon Mason6465d022014-04-07 10:55:47 -07002969};
2970
Allen Hubbee26a5842015-04-09 10:33:20 -04002971static int __init intel_ntb_pci_driver_init(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07002972{
Dave Jiang7eb38782015-06-15 08:21:33 -04002973 pr_info("%s %s\n", NTB_DESC, NTB_VER);
2974
Allen Hubbee26a5842015-04-09 10:33:20 -04002975 if (debugfs_initialized())
Jon Mason1517a3f2013-07-30 15:58:49 -07002976 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2977
Allen Hubbee26a5842015-04-09 10:33:20 -04002978 return pci_register_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07002979}
Allen Hubbee26a5842015-04-09 10:33:20 -04002980module_init(intel_ntb_pci_driver_init);
Jon Mason1517a3f2013-07-30 15:58:49 -07002981
Allen Hubbee26a5842015-04-09 10:33:20 -04002982static void __exit intel_ntb_pci_driver_exit(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07002983{
Allen Hubbee26a5842015-04-09 10:33:20 -04002984 pci_unregister_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07002985
Allen Hubbee26a5842015-04-09 10:33:20 -04002986 debugfs_remove_recursive(debugfs_dir);
Jon Mason1517a3f2013-07-30 15:58:49 -07002987}
Allen Hubbee26a5842015-04-09 10:33:20 -04002988module_exit(intel_ntb_pci_driver_exit);
Jon Mason1517a3f2013-07-30 15:58:49 -07002989