blob: 6aa57322727916bd5bc1c8e5ab13f286f8fd1b1d [file] [log] [blame]
Jon Masonfce8a7b2012-11-16 19:27:12 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -04008 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Serge Semin443b9a12017-01-11 03:11:33 +03009 * Copyright (C) 2016 T-Platforms. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -070010 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * BSD LICENSE
16 *
17 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -040018 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Serge Semin443b9a12017-01-11 03:11:33 +030019 * Copyright (C) 2016 T-Platforms. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -070020 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
24 *
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copy
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
30 * distribution.
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 *
47 * Intel PCIe NTB Linux driver
Jon Masonfce8a7b2012-11-16 19:27:12 -070048 */
Allen Hubbee26a5842015-04-09 10:33:20 -040049
Jon Masonfce8a7b2012-11-16 19:27:12 -070050#include <linux/debugfs.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070051#include <linux/delay.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070052#include <linux/init.h>
53#include <linux/interrupt.h>
54#include <linux/module.h>
55#include <linux/pci.h>
Jon Mason113bf1c2012-11-16 18:52:57 -070056#include <linux/random.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070057#include <linux/slab.h>
Allen Hubbee26a5842015-04-09 10:33:20 -040058#include <linux/ntb.h>
59
Dave Jiangf6e51c32018-01-29 13:22:24 -070060#include "ntb_hw_intel.h"
Dave Jianga9065052018-01-29 13:22:18 -070061#include "ntb_hw_gen1.h"
62#include "ntb_hw_gen3.h"
Jon Masonfce8a7b2012-11-16 19:27:12 -070063
Allen Hubbee26a5842015-04-09 10:33:20 -040064#define NTB_NAME "ntb_hw_intel"
65#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
66#define NTB_VER "2.0"
Jon Masonfce8a7b2012-11-16 19:27:12 -070067
Allen Hubbee26a5842015-04-09 10:33:20 -040068MODULE_DESCRIPTION(NTB_DESC);
Jon Masonfce8a7b2012-11-16 19:27:12 -070069MODULE_VERSION(NTB_VER);
70MODULE_LICENSE("Dual BSD/GPL");
71MODULE_AUTHOR("Intel Corporation");
72
Allen Hubbee26a5842015-04-09 10:33:20 -040073#define bar0_off(base, bar) ((base) + ((bar) << 2))
74#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
Jon Masonfce8a7b2012-11-16 19:27:12 -070075
Dave Jiang2f887b92015-05-20 12:55:47 -040076static const struct intel_ntb_reg xeon_reg;
77static const struct intel_ntb_alt_reg xeon_pri_reg;
78static const struct intel_ntb_alt_reg xeon_sec_reg;
79static const struct intel_ntb_alt_reg xeon_b2b_reg;
80static const struct intel_ntb_xlat_reg xeon_pri_xlat;
81static const struct intel_ntb_xlat_reg xeon_sec_xlat;
Allen Hubbe42fefc82015-05-11 05:45:30 -040082static const struct ntb_dev_ops intel_ntb_ops;
83
84static const struct file_operations intel_ntb_debugfs_info;
85static struct dentry *debugfs_dir;
86
Allen Hubbee26a5842015-04-09 10:33:20 -040087static int b2b_mw_idx = -1;
88module_param(b2b_mw_idx, int, 0644);
89MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
90 "value of zero or positive starts from first mw idx, and a "
91 "negative value starts from last mw idx. Both sides MUST "
92 "set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -070093
Allen Hubbee26a5842015-04-09 10:33:20 -040094static unsigned int b2b_mw_share;
95module_param(b2b_mw_share, uint, 0644);
96MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
97 "ntb so that the peer ntb only occupies the first half of "
98 "the mw, so the second half can still be used as a mw. Both "
99 "sides MUST set the same value here!");
Jon Masonfce8a7b2012-11-16 19:27:12 -0700100
Dave Jiang2f887b92015-05-20 12:55:47 -0400101module_param_named(xeon_b2b_usd_bar2_addr64,
102 xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
103MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
104 "XEON B2B USD BAR 2 64-bit address");
Allen Hubbee26a5842015-04-09 10:33:20 -0400105
Dave Jiang2f887b92015-05-20 12:55:47 -0400106module_param_named(xeon_b2b_usd_bar4_addr64,
107 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000108MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
Dave Jiang2f887b92015-05-20 12:55:47 -0400109 "XEON B2B USD BAR 4 64-bit address");
Allen Hubbee26a5842015-04-09 10:33:20 -0400110
Dave Jiang2f887b92015-05-20 12:55:47 -0400111module_param_named(xeon_b2b_usd_bar4_addr32,
112 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000113MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400114 "XEON B2B USD split-BAR 4 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400115
Dave Jiang2f887b92015-05-20 12:55:47 -0400116module_param_named(xeon_b2b_usd_bar5_addr32,
117 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000118MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400119 "XEON B2B USD split-BAR 5 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400120
Dave Jiang2f887b92015-05-20 12:55:47 -0400121module_param_named(xeon_b2b_dsd_bar2_addr64,
122 xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
123MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
124 "XEON B2B DSD BAR 2 64-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400125
Dave Jiang2f887b92015-05-20 12:55:47 -0400126module_param_named(xeon_b2b_dsd_bar4_addr64,
127 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000128MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
Dave Jiang2f887b92015-05-20 12:55:47 -0400129 "XEON B2B DSD BAR 4 64-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400130
Dave Jiang2f887b92015-05-20 12:55:47 -0400131module_param_named(xeon_b2b_dsd_bar4_addr32,
132 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000133MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400134 "XEON B2B DSD split-BAR 4 32-bit address");
Allen Hubbe42fefc82015-05-11 05:45:30 -0400135
Dave Jiang2f887b92015-05-20 12:55:47 -0400136module_param_named(xeon_b2b_dsd_bar5_addr32,
137 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
Wei Yongjun49b89de2016-08-08 09:48:42 +0000138MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
Dave Jiang2f887b92015-05-20 12:55:47 -0400139 "XEON B2B DSD split-BAR 5 32-bit address");
Jon Mason1517a3f2013-07-30 15:58:49 -0700140
Dave Jiangf6e51c32018-01-29 13:22:24 -0700141
Dave Jiang783dfa62016-11-16 14:03:38 -0700142static int xeon_init_isr(struct intel_ntb_dev *ndev);
143
Allen Hubbee26a5842015-04-09 10:33:20 -0400144static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
145{
146 ndev->unsafe_flags = 0;
147 ndev->unsafe_flags_ignore = 0;
148
149 /* Only B2B has a workaround to avoid SDOORBELL */
150 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
151 if (!ntb_topo_is_b2b(ndev->ntb.topo))
152 ndev->unsafe_flags |= NTB_UNSAFE_DB;
153
154 /* No low level workaround to avoid SB01BASE */
155 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
156 ndev->unsafe_flags |= NTB_UNSAFE_DB;
157 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
158 }
159}
160
161static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
162 unsigned long flag)
163{
164 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
165}
166
167static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
168 unsigned long flag)
169{
170 flag &= ndev->unsafe_flags;
171 ndev->unsafe_flags_ignore |= flag;
172
173 return !!flag;
174}
175
Dave Jiangf6e51c32018-01-29 13:22:24 -0700176int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
Allen Hubbee26a5842015-04-09 10:33:20 -0400177{
Allen Hubbe9a078262015-08-31 09:31:00 -0400178 if (idx < 0 || idx >= ndev->mw_count)
Allen Hubbee26a5842015-04-09 10:33:20 -0400179 return -EINVAL;
180 return ndev->reg->mw_bar[idx];
181}
182
183static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
184 phys_addr_t *db_addr, resource_size_t *db_size,
185 phys_addr_t reg_addr, unsigned long reg)
186{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400187 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
188 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400189
190 if (db_addr) {
191 *db_addr = reg_addr + reg;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700192 dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -0400193 }
194
195 if (db_size) {
196 *db_size = ndev->reg->db_size;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700197 dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
Allen Hubbee26a5842015-04-09 10:33:20 -0400198 }
199
200 return 0;
201}
202
Dave Jiangf6e51c32018-01-29 13:22:24 -0700203u64 ndev_db_read(struct intel_ntb_dev *ndev,
Allen Hubbee26a5842015-04-09 10:33:20 -0400204 void __iomem *mmio)
205{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400206 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
207 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400208
209 return ndev->reg->db_ioread(mmio);
210}
211
Dave Jiangf6e51c32018-01-29 13:22:24 -0700212int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
Allen Hubbee26a5842015-04-09 10:33:20 -0400213 void __iomem *mmio)
214{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400215 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
216 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400217
218 if (db_bits & ~ndev->db_valid_mask)
219 return -EINVAL;
220
221 ndev->reg->db_iowrite(db_bits, mmio);
222
223 return 0;
224}
225
226static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
227 void __iomem *mmio)
228{
229 unsigned long irqflags;
230
Dave Jiangfd839bf2015-06-15 08:22:30 -0400231 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
232 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400233
234 if (db_bits & ~ndev->db_valid_mask)
235 return -EINVAL;
236
237 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
238 {
239 ndev->db_mask |= db_bits;
240 ndev->reg->db_iowrite(ndev->db_mask, mmio);
241 }
242 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
243
244 return 0;
245}
246
247static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
248 void __iomem *mmio)
249{
250 unsigned long irqflags;
251
Dave Jiangfd839bf2015-06-15 08:22:30 -0400252 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
253 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400254
255 if (db_bits & ~ndev->db_valid_mask)
256 return -EINVAL;
257
258 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
259 {
260 ndev->db_mask &= ~db_bits;
261 ndev->reg->db_iowrite(ndev->db_mask, mmio);
262 }
263 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
264
265 return 0;
266}
267
268static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
269{
270 u64 shift, mask;
271
272 shift = ndev->db_vec_shift;
273 mask = BIT_ULL(shift) - 1;
274
275 return mask << (shift * db_vector);
276}
277
278static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
279 phys_addr_t *spad_addr, phys_addr_t reg_addr,
280 unsigned long reg)
281{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400282 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
283 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400284
285 if (idx < 0 || idx >= ndev->spad_count)
286 return -EINVAL;
287
288 if (spad_addr) {
289 *spad_addr = reg_addr + reg + (idx << 2);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700290 dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
291 *spad_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -0400292 }
293
294 return 0;
295}
296
297static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
298 void __iomem *mmio)
299{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400300 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
301 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400302
303 if (idx < 0 || idx >= ndev->spad_count)
304 return 0;
305
306 return ioread32(mmio + (idx << 2));
307}
308
309static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
310 void __iomem *mmio)
311{
Dave Jiangfd839bf2015-06-15 08:22:30 -0400312 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
313 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
Allen Hubbee26a5842015-04-09 10:33:20 -0400314
315 if (idx < 0 || idx >= ndev->spad_count)
316 return -EINVAL;
317
318 iowrite32(val, mmio + (idx << 2));
319
320 return 0;
321}
322
323static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
324{
325 u64 vec_mask;
326
327 vec_mask = ndev_vec_mask(ndev, vec);
328
Dave Jiang783dfa62016-11-16 14:03:38 -0700329 if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
330 vec_mask |= ndev->db_link_mask;
331
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700332 dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
Allen Hubbee26a5842015-04-09 10:33:20 -0400333
334 ndev->last_ts = jiffies;
335
336 if (vec_mask & ndev->db_link_mask) {
337 if (ndev->reg->poll_link(ndev))
338 ntb_link_event(&ndev->ntb);
339 }
340
341 if (vec_mask & ndev->db_valid_mask)
342 ntb_db_event(&ndev->ntb, vec);
343
344 return IRQ_HANDLED;
345}
346
347static irqreturn_t ndev_vec_isr(int irq, void *dev)
348{
349 struct intel_ntb_vec *nvec = dev;
350
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700351 dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n",
Dave Jiang783dfa62016-11-16 14:03:38 -0700352 irq, nvec->num);
353
Allen Hubbee26a5842015-04-09 10:33:20 -0400354 return ndev_interrupt(nvec->ndev, nvec->num);
355}
356
357static irqreturn_t ndev_irq_isr(int irq, void *dev)
358{
359 struct intel_ntb_dev *ndev = dev;
360
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700361 return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
Allen Hubbee26a5842015-04-09 10:33:20 -0400362}
363
Dave Jiangf6e51c32018-01-29 13:22:24 -0700364int ndev_init_isr(struct intel_ntb_dev *ndev,
Allen Hubbee26a5842015-04-09 10:33:20 -0400365 int msix_min, int msix_max,
366 int msix_shift, int total_shift)
367{
368 struct pci_dev *pdev;
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400369 int rc, i, msix_count, node;
Allen Hubbee26a5842015-04-09 10:33:20 -0400370
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700371 pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400372
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400373 node = dev_to_node(&pdev->dev);
374
Allen Hubbee26a5842015-04-09 10:33:20 -0400375 /* Mask all doorbell interrupts */
376 ndev->db_mask = ndev->db_valid_mask;
377 ndev->reg->db_iowrite(ndev->db_mask,
378 ndev->self_mmio +
379 ndev->self_reg->db_mask);
380
381 /* Try to set up msix irq */
382
Kees Cook590b5b72018-06-12 14:04:20 -0700383 ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400384 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -0400385 if (!ndev->vec)
386 goto err_msix_vec_alloc;
387
Kees Cook590b5b72018-06-12 14:04:20 -0700388 ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
Allen Hubbe0e041fb2015-05-19 12:04:52 -0400389 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -0400390 if (!ndev->msix)
391 goto err_msix_alloc;
392
393 for (i = 0; i < msix_max; ++i)
394 ndev->msix[i].entry = i;
395
396 msix_count = pci_enable_msix_range(pdev, ndev->msix,
397 msix_min, msix_max);
398 if (msix_count < 0)
399 goto err_msix_enable;
400
401 for (i = 0; i < msix_count; ++i) {
402 ndev->vec[i].ndev = ndev;
403 ndev->vec[i].num = i;
404 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
405 "ndev_vec_isr", &ndev->vec[i]);
406 if (rc)
407 goto err_msix_request;
408 }
409
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700410 dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
Allen Hubbee26a5842015-04-09 10:33:20 -0400411 ndev->db_vec_count = msix_count;
412 ndev->db_vec_shift = msix_shift;
413 return 0;
414
415err_msix_request:
416 while (i-- > 0)
Christophe JAILLET28734e82016-12-19 06:52:55 +0100417 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
Allen Hubbee26a5842015-04-09 10:33:20 -0400418 pci_disable_msix(pdev);
419err_msix_enable:
420 kfree(ndev->msix);
421err_msix_alloc:
422 kfree(ndev->vec);
423err_msix_vec_alloc:
424 ndev->msix = NULL;
425 ndev->vec = NULL;
426
427 /* Try to set up msi irq */
428
429 rc = pci_enable_msi(pdev);
430 if (rc)
431 goto err_msi_enable;
432
433 rc = request_irq(pdev->irq, ndev_irq_isr, 0,
434 "ndev_irq_isr", ndev);
435 if (rc)
436 goto err_msi_request;
437
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700438 dev_dbg(&pdev->dev, "Using msi interrupts\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400439 ndev->db_vec_count = 1;
440 ndev->db_vec_shift = total_shift;
441 return 0;
442
443err_msi_request:
444 pci_disable_msi(pdev);
445err_msi_enable:
446
447 /* Try to set up intx irq */
448
449 pci_intx(pdev, 1);
450
451 rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
452 "ndev_irq_isr", ndev);
453 if (rc)
454 goto err_intx_request;
455
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700456 dev_dbg(&pdev->dev, "Using intx interrupts\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400457 ndev->db_vec_count = 1;
458 ndev->db_vec_shift = total_shift;
459 return 0;
460
461err_intx_request:
462 return rc;
463}
464
465static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
466{
467 struct pci_dev *pdev;
468 int i;
469
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700470 pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400471
472 /* Mask all doorbell interrupts */
473 ndev->db_mask = ndev->db_valid_mask;
474 ndev->reg->db_iowrite(ndev->db_mask,
475 ndev->self_mmio +
476 ndev->self_reg->db_mask);
477
478 if (ndev->msix) {
479 i = ndev->db_vec_count;
480 while (i--)
481 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
482 pci_disable_msix(pdev);
483 kfree(ndev->msix);
484 kfree(ndev->vec);
485 } else {
486 free_irq(pdev->irq, ndev);
487 if (pci_dev_msi_enabled(pdev))
488 pci_disable_msi(pdev);
489 }
490}
491
Dave Jiang783dfa62016-11-16 14:03:38 -0700492static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
493 size_t count, loff_t *offp)
Allen Hubbee26a5842015-04-09 10:33:20 -0400494{
495 struct intel_ntb_dev *ndev;
Allen Hubbe40895272016-07-22 09:38:22 -0400496 struct pci_dev *pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400497 void __iomem *mmio;
498 char *buf;
499 size_t buf_size;
500 ssize_t ret, off;
Allen Hubbe40895272016-07-22 09:38:22 -0400501 union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
Allen Hubbee26a5842015-04-09 10:33:20 -0400502
503 ndev = filp->private_data;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700504 pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400505 mmio = ndev->self_mmio;
506
507 buf_size = min(count, 0x800ul);
508
509 buf = kmalloc(buf_size, GFP_KERNEL);
510 if (!buf)
511 return -ENOMEM;
512
513 off = 0;
514
515 off += scnprintf(buf + off, buf_size - off,
516 "NTB Device Information:\n");
517
518 off += scnprintf(buf + off, buf_size - off,
519 "Connection Topology -\t%s\n",
520 ntb_topo_string(ndev->ntb.topo));
521
Allen Hubbe2aa2a772015-08-31 09:30:59 -0400522 if (ndev->b2b_idx != UINT_MAX) {
523 off += scnprintf(buf + off, buf_size - off,
524 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
525 off += scnprintf(buf + off, buf_size - off,
526 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
527 }
528
Allen Hubbee26a5842015-04-09 10:33:20 -0400529 off += scnprintf(buf + off, buf_size - off,
530 "BAR4 Split -\t\t%s\n",
531 ndev->bar4_split ? "yes" : "no");
532
533 off += scnprintf(buf + off, buf_size - off,
534 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
535 off += scnprintf(buf + off, buf_size - off,
536 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
537
538 if (!ndev->reg->link_is_up(ndev)) {
539 off += scnprintf(buf + off, buf_size - off,
540 "Link Status -\t\tDown\n");
541 } else {
542 off += scnprintf(buf + off, buf_size - off,
543 "Link Status -\t\tUp\n");
544 off += scnprintf(buf + off, buf_size - off,
545 "Link Speed -\t\tPCI-E Gen %u\n",
546 NTB_LNK_STA_SPEED(ndev->lnk_sta));
547 off += scnprintf(buf + off, buf_size - off,
548 "Link Width -\t\tx%u\n",
549 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
550 }
551
552 off += scnprintf(buf + off, buf_size - off,
553 "Memory Window Count -\t%u\n", ndev->mw_count);
554 off += scnprintf(buf + off, buf_size - off,
555 "Scratchpad Count -\t%u\n", ndev->spad_count);
556 off += scnprintf(buf + off, buf_size - off,
557 "Doorbell Count -\t%u\n", ndev->db_count);
558 off += scnprintf(buf + off, buf_size - off,
559 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
560 off += scnprintf(buf + off, buf_size - off,
561 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
562
563 off += scnprintf(buf + off, buf_size - off,
564 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
565 off += scnprintf(buf + off, buf_size - off,
566 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
567 off += scnprintf(buf + off, buf_size - off,
568 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
569
570 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
571 off += scnprintf(buf + off, buf_size - off,
572 "Doorbell Mask -\t\t%#llx\n", u.v64);
573
574 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
575 off += scnprintf(buf + off, buf_size - off,
576 "Doorbell Bell -\t\t%#llx\n", u.v64);
577
578 off += scnprintf(buf + off, buf_size - off,
Allen Hubbe40895272016-07-22 09:38:22 -0400579 "\nNTB Window Size:\n");
580
581 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
582 off += scnprintf(buf + off, buf_size - off,
583 "PBAR23SZ %hhu\n", u.v8);
584 if (!ndev->bar4_split) {
585 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
586 off += scnprintf(buf + off, buf_size - off,
587 "PBAR45SZ %hhu\n", u.v8);
588 } else {
589 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
590 off += scnprintf(buf + off, buf_size - off,
591 "PBAR4SZ %hhu\n", u.v8);
592 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
593 off += scnprintf(buf + off, buf_size - off,
594 "PBAR5SZ %hhu\n", u.v8);
595 }
596
597 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
598 off += scnprintf(buf + off, buf_size - off,
599 "SBAR23SZ %hhu\n", u.v8);
600 if (!ndev->bar4_split) {
601 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
602 off += scnprintf(buf + off, buf_size - off,
603 "SBAR45SZ %hhu\n", u.v8);
604 } else {
605 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
606 off += scnprintf(buf + off, buf_size - off,
607 "SBAR4SZ %hhu\n", u.v8);
608 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
609 off += scnprintf(buf + off, buf_size - off,
610 "SBAR5SZ %hhu\n", u.v8);
611 }
612
613 off += scnprintf(buf + off, buf_size - off,
Allen Hubbee26a5842015-04-09 10:33:20 -0400614 "\nNTB Incoming XLAT:\n");
615
616 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
617 off += scnprintf(buf + off, buf_size - off,
618 "XLAT23 -\t\t%#018llx\n", u.v64);
619
Dave Jiangbf44fe42015-06-18 05:17:30 -0400620 if (ndev->bar4_split) {
621 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
622 off += scnprintf(buf + off, buf_size - off,
623 "XLAT4 -\t\t\t%#06x\n", u.v32);
624
625 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
626 off += scnprintf(buf + off, buf_size - off,
627 "XLAT5 -\t\t\t%#06x\n", u.v32);
628 } else {
629 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
630 off += scnprintf(buf + off, buf_size - off,
631 "XLAT45 -\t\t%#018llx\n", u.v64);
632 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400633
634 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
635 off += scnprintf(buf + off, buf_size - off,
636 "LMT23 -\t\t\t%#018llx\n", u.v64);
637
Dave Jiangbf44fe42015-06-18 05:17:30 -0400638 if (ndev->bar4_split) {
639 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
640 off += scnprintf(buf + off, buf_size - off,
641 "LMT4 -\t\t\t%#06x\n", u.v32);
642 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
643 off += scnprintf(buf + off, buf_size - off,
644 "LMT5 -\t\t\t%#06x\n", u.v32);
645 } else {
646 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
647 off += scnprintf(buf + off, buf_size - off,
648 "LMT45 -\t\t\t%#018llx\n", u.v64);
649 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400650
Dave Jiang6c1e8ab2018-01-29 13:22:30 -0700651 if (pdev_is_gen1(pdev)) {
Allen Hubbee26a5842015-04-09 10:33:20 -0400652 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
653 off += scnprintf(buf + off, buf_size - off,
654 "\nNTB Outgoing B2B XLAT:\n");
655
Dave Jiang2f887b92015-05-20 12:55:47 -0400656 u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400657 off += scnprintf(buf + off, buf_size - off,
658 "B2B XLAT23 -\t\t%#018llx\n", u.v64);
659
Dave Jiangbf44fe42015-06-18 05:17:30 -0400660 if (ndev->bar4_split) {
661 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
662 off += scnprintf(buf + off, buf_size - off,
663 "B2B XLAT4 -\t\t%#06x\n",
664 u.v32);
665 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
666 off += scnprintf(buf + off, buf_size - off,
667 "B2B XLAT5 -\t\t%#06x\n",
668 u.v32);
669 } else {
670 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
671 off += scnprintf(buf + off, buf_size - off,
672 "B2B XLAT45 -\t\t%#018llx\n",
673 u.v64);
674 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400675
Dave Jiang2f887b92015-05-20 12:55:47 -0400676 u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400677 off += scnprintf(buf + off, buf_size - off,
678 "B2B LMT23 -\t\t%#018llx\n", u.v64);
679
Dave Jiangbf44fe42015-06-18 05:17:30 -0400680 if (ndev->bar4_split) {
681 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
682 off += scnprintf(buf + off, buf_size - off,
683 "B2B LMT4 -\t\t%#06x\n",
684 u.v32);
685 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
686 off += scnprintf(buf + off, buf_size - off,
687 "B2B LMT5 -\t\t%#06x\n",
688 u.v32);
689 } else {
690 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
691 off += scnprintf(buf + off, buf_size - off,
692 "B2B LMT45 -\t\t%#018llx\n",
693 u.v64);
694 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400695
696 off += scnprintf(buf + off, buf_size - off,
697 "\nNTB Secondary BAR:\n");
698
Dave Jiang2f887b92015-05-20 12:55:47 -0400699 u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400700 off += scnprintf(buf + off, buf_size - off,
701 "SBAR01 -\t\t%#018llx\n", u.v64);
702
Dave Jiang2f887b92015-05-20 12:55:47 -0400703 u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400704 off += scnprintf(buf + off, buf_size - off,
705 "SBAR23 -\t\t%#018llx\n", u.v64);
706
Dave Jiangbf44fe42015-06-18 05:17:30 -0400707 if (ndev->bar4_split) {
708 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
709 off += scnprintf(buf + off, buf_size - off,
710 "SBAR4 -\t\t\t%#06x\n", u.v32);
711 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
712 off += scnprintf(buf + off, buf_size - off,
713 "SBAR5 -\t\t\t%#06x\n", u.v32);
714 } else {
715 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
716 off += scnprintf(buf + off, buf_size - off,
717 "SBAR45 -\t\t%#018llx\n",
718 u.v64);
719 }
Allen Hubbee26a5842015-04-09 10:33:20 -0400720 }
721
722 off += scnprintf(buf + off, buf_size - off,
Dave Jiang2f887b92015-05-20 12:55:47 -0400723 "\nXEON NTB Statistics:\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400724
Dave Jiang2f887b92015-05-20 12:55:47 -0400725 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -0400726 off += scnprintf(buf + off, buf_size - off,
727 "Upstream Memory Miss -\t%u\n", u.v16);
728
729 off += scnprintf(buf + off, buf_size - off,
Dave Jiang2f887b92015-05-20 12:55:47 -0400730 "\nXEON NTB Hardware Errors:\n");
Allen Hubbee26a5842015-04-09 10:33:20 -0400731
Allen Hubbe95f14642016-07-22 09:38:23 -0400732 if (!pci_read_config_word(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400733 XEON_DEVSTS_OFFSET, &u.v16))
Allen Hubbee26a5842015-04-09 10:33:20 -0400734 off += scnprintf(buf + off, buf_size - off,
735 "DEVSTS -\t\t%#06x\n", u.v16);
736
Allen Hubbe95f14642016-07-22 09:38:23 -0400737 if (!pci_read_config_word(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400738 XEON_LINK_STATUS_OFFSET, &u.v16))
Allen Hubbee26a5842015-04-09 10:33:20 -0400739 off += scnprintf(buf + off, buf_size - off,
740 "LNKSTS -\t\t%#06x\n", u.v16);
741
Allen Hubbe95f14642016-07-22 09:38:23 -0400742 if (!pci_read_config_dword(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400743 XEON_UNCERRSTS_OFFSET, &u.v32))
Allen Hubbee26a5842015-04-09 10:33:20 -0400744 off += scnprintf(buf + off, buf_size - off,
745 "UNCERRSTS -\t\t%#06x\n", u.v32);
746
Allen Hubbe95f14642016-07-22 09:38:23 -0400747 if (!pci_read_config_dword(pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -0400748 XEON_CORERRSTS_OFFSET, &u.v32))
Allen Hubbee26a5842015-04-09 10:33:20 -0400749 off += scnprintf(buf + off, buf_size - off,
750 "CORERRSTS -\t\t%#06x\n", u.v32);
751 }
752
753 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
754 kfree(buf);
755 return ret;
756}
757
Dave Jiang783dfa62016-11-16 14:03:38 -0700758static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
759 size_t count, loff_t *offp)
760{
761 struct intel_ntb_dev *ndev = filp->private_data;
762
Dave Jiang6c1e8ab2018-01-29 13:22:30 -0700763 if (pdev_is_gen1(ndev->ntb.pdev))
Dave Jiang783dfa62016-11-16 14:03:38 -0700764 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
Dave Jiang6c1e8ab2018-01-29 13:22:30 -0700765 else if (pdev_is_gen3(ndev->ntb.pdev))
Dave Jiang783dfa62016-11-16 14:03:38 -0700766 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
767
768 return -ENXIO;
769}
770
Allen Hubbee26a5842015-04-09 10:33:20 -0400771static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
772{
773 if (!debugfs_dir) {
774 ndev->debugfs_dir = NULL;
775 ndev->debugfs_info = NULL;
776 } else {
777 ndev->debugfs_dir =
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700778 debugfs_create_dir(pci_name(ndev->ntb.pdev),
779 debugfs_dir);
Allen Hubbee26a5842015-04-09 10:33:20 -0400780 if (!ndev->debugfs_dir)
781 ndev->debugfs_info = NULL;
782 else
783 ndev->debugfs_info =
784 debugfs_create_file("info", S_IRUSR,
785 ndev->debugfs_dir, ndev,
786 &intel_ntb_debugfs_info);
787 }
788}
789
790static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
791{
792 debugfs_remove_recursive(ndev->debugfs_dir);
793}
794
Dave Jiangf6e51c32018-01-29 13:22:24 -0700795int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
Allen Hubbee26a5842015-04-09 10:33:20 -0400796{
Serge Semin443b9a12017-01-11 03:11:33 +0300797 if (pidx != NTB_DEF_PEER_IDX)
798 return -EINVAL;
799
Allen Hubbee26a5842015-04-09 10:33:20 -0400800 return ntb_ndev(ntb)->mw_count;
801}
802
Dave Jiangf6e51c32018-01-29 13:22:24 -0700803int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
804 resource_size_t *addr_align,
805 resource_size_t *size_align,
806 resource_size_t *size_max)
Allen Hubbee26a5842015-04-09 10:33:20 -0400807{
808 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
Serge Semin443b9a12017-01-11 03:11:33 +0300809 resource_size_t bar_size, mw_size;
Allen Hubbee26a5842015-04-09 10:33:20 -0400810 int bar;
811
Serge Semin443b9a12017-01-11 03:11:33 +0300812 if (pidx != NTB_DEF_PEER_IDX)
813 return -EINVAL;
814
Allen Hubbee26a5842015-04-09 10:33:20 -0400815 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
816 idx += 1;
817
818 bar = ndev_mw_to_bar(ndev, idx);
819 if (bar < 0)
820 return bar;
821
Serge Semin443b9a12017-01-11 03:11:33 +0300822 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
Allen Hubbee26a5842015-04-09 10:33:20 -0400823
Serge Semin443b9a12017-01-11 03:11:33 +0300824 if (idx == ndev->b2b_idx)
825 mw_size = bar_size - ndev->b2b_off;
826 else
827 mw_size = bar_size;
Allen Hubbee26a5842015-04-09 10:33:20 -0400828
Serge Semin443b9a12017-01-11 03:11:33 +0300829 if (addr_align)
830 *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
Allen Hubbee26a5842015-04-09 10:33:20 -0400831
Serge Semin443b9a12017-01-11 03:11:33 +0300832 if (size_align)
833 *size_align = 1;
834
835 if (size_max)
836 *size_max = mw_size;
Allen Hubbee26a5842015-04-09 10:33:20 -0400837
838 return 0;
839}
840
Serge Semin443b9a12017-01-11 03:11:33 +0300841static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
Allen Hubbee26a5842015-04-09 10:33:20 -0400842 dma_addr_t addr, resource_size_t size)
843{
844 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
845 unsigned long base_reg, xlat_reg, limit_reg;
846 resource_size_t bar_size, mw_size;
847 void __iomem *mmio;
848 u64 base, limit, reg_val;
849 int bar;
850
Serge Semin443b9a12017-01-11 03:11:33 +0300851 if (pidx != NTB_DEF_PEER_IDX)
852 return -EINVAL;
853
Allen Hubbee26a5842015-04-09 10:33:20 -0400854 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
855 idx += 1;
856
857 bar = ndev_mw_to_bar(ndev, idx);
858 if (bar < 0)
859 return bar;
860
861 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
862
863 if (idx == ndev->b2b_idx)
864 mw_size = bar_size - ndev->b2b_off;
865 else
866 mw_size = bar_size;
867
868 /* hardware requires that addr is aligned to bar size */
869 if (addr & (bar_size - 1))
870 return -EINVAL;
871
872 /* make sure the range fits in the usable mw size */
873 if (size > mw_size)
874 return -EINVAL;
875
876 mmio = ndev->self_mmio;
877 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
878 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
879 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
880
881 if (bar < 4 || !ndev->bar4_split) {
Dave Jiang703872c2015-11-19 14:00:54 -0700882 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
Allen Hubbee26a5842015-04-09 10:33:20 -0400883
884 /* Set the limit if supported, if size is not mw_size */
885 if (limit_reg && size != mw_size)
886 limit = base + size;
887 else
888 limit = 0;
889
890 /* set and verify setting the translation address */
891 iowrite64(addr, mmio + xlat_reg);
892 reg_val = ioread64(mmio + xlat_reg);
893 if (reg_val != addr) {
894 iowrite64(0, mmio + xlat_reg);
895 return -EIO;
896 }
897
898 /* set and verify setting the limit */
899 iowrite64(limit, mmio + limit_reg);
900 reg_val = ioread64(mmio + limit_reg);
901 if (reg_val != limit) {
902 iowrite64(base, mmio + limit_reg);
903 iowrite64(0, mmio + xlat_reg);
904 return -EIO;
905 }
906 } else {
907 /* split bar addr range must all be 32 bit */
908 if (addr & (~0ull << 32))
909 return -EINVAL;
910 if ((addr + size) & (~0ull << 32))
911 return -EINVAL;
912
Dave Jiang703872c2015-11-19 14:00:54 -0700913 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
Allen Hubbee26a5842015-04-09 10:33:20 -0400914
915 /* Set the limit if supported, if size is not mw_size */
916 if (limit_reg && size != mw_size)
917 limit = base + size;
918 else
919 limit = 0;
920
921 /* set and verify setting the translation address */
922 iowrite32(addr, mmio + xlat_reg);
923 reg_val = ioread32(mmio + xlat_reg);
924 if (reg_val != addr) {
925 iowrite32(0, mmio + xlat_reg);
926 return -EIO;
927 }
928
929 /* set and verify setting the limit */
930 iowrite32(limit, mmio + limit_reg);
931 reg_val = ioread32(mmio + limit_reg);
932 if (reg_val != limit) {
933 iowrite32(base, mmio + limit_reg);
934 iowrite32(0, mmio + xlat_reg);
935 return -EIO;
936 }
937 }
938
939 return 0;
940}
941
Dave Jiangf6e51c32018-01-29 13:22:24 -0700942u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed,
943 enum ntb_width *width)
Allen Hubbee26a5842015-04-09 10:33:20 -0400944{
945 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
946
947 if (ndev->reg->link_is_up(ndev)) {
948 if (speed)
949 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
950 if (width)
951 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
952 return 1;
953 } else {
954 /* TODO MAYBE: is it possible to observe the link speed and
955 * width while link is training? */
956 if (speed)
957 *speed = NTB_SPEED_NONE;
958 if (width)
959 *width = NTB_WIDTH_NONE;
960 return 0;
961 }
962}
963
964static int intel_ntb_link_enable(struct ntb_dev *ntb,
965 enum ntb_speed max_speed,
966 enum ntb_width max_width)
967{
968 struct intel_ntb_dev *ndev;
969 u32 ntb_ctl;
970
971 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
972
973 if (ndev->ntb.topo == NTB_TOPO_SEC)
974 return -EINVAL;
975
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700976 dev_dbg(&ntb->pdev->dev,
Allen Hubbee26a5842015-04-09 10:33:20 -0400977 "Enabling link with max_speed %d max_width %d\n",
978 max_speed, max_width);
979 if (max_speed != NTB_SPEED_AUTO)
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700980 dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
Allen Hubbee26a5842015-04-09 10:33:20 -0400981 if (max_width != NTB_WIDTH_AUTO)
Logan Gunthorpe48ea0212017-01-10 17:33:37 -0700982 dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
Allen Hubbee26a5842015-04-09 10:33:20 -0400983
984 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
985 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
986 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
987 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
988 if (ndev->bar4_split)
989 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
990 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
991
992 return 0;
993}
994
Dave Jiangf6e51c32018-01-29 13:22:24 -0700995int intel_ntb_link_disable(struct ntb_dev *ntb)
Allen Hubbee26a5842015-04-09 10:33:20 -0400996{
997 struct intel_ntb_dev *ndev;
998 u32 ntb_cntl;
999
1000 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1001
1002 if (ndev->ntb.topo == NTB_TOPO_SEC)
1003 return -EINVAL;
1004
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001005 dev_dbg(&ntb->pdev->dev, "Disabling link\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001006
1007 /* Bring NTB link down */
1008 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1009 ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1010 ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1011 if (ndev->bar4_split)
1012 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1013 ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1014 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1015
1016 return 0;
1017}
1018
Dave Jiangf6e51c32018-01-29 13:22:24 -07001019int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
Serge Semin443b9a12017-01-11 03:11:33 +03001020{
1021 /* Numbers of inbound and outbound memory windows match */
1022 return ntb_ndev(ntb)->mw_count;
1023}
1024
Dave Jiangf6e51c32018-01-29 13:22:24 -07001025int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
1026 phys_addr_t *base, resource_size_t *size)
Serge Semin443b9a12017-01-11 03:11:33 +03001027{
1028 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1029 int bar;
1030
1031 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1032 idx += 1;
1033
1034 bar = ndev_mw_to_bar(ndev, idx);
1035 if (bar < 0)
1036 return bar;
1037
1038 if (base)
1039 *base = pci_resource_start(ndev->ntb.pdev, bar) +
1040 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1041
1042 if (size)
1043 *size = pci_resource_len(ndev->ntb.pdev, bar) -
1044 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1045
1046 return 0;
1047}
1048
Allen Hubbee26a5842015-04-09 10:33:20 -04001049static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1050{
1051 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1052}
1053
Dave Jiangf6e51c32018-01-29 13:22:24 -07001054u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
Allen Hubbee26a5842015-04-09 10:33:20 -04001055{
1056 return ntb_ndev(ntb)->db_valid_mask;
1057}
1058
Dave Jiangf6e51c32018-01-29 13:22:24 -07001059int intel_ntb_db_vector_count(struct ntb_dev *ntb)
Allen Hubbee26a5842015-04-09 10:33:20 -04001060{
1061 struct intel_ntb_dev *ndev;
1062
1063 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1064
1065 return ndev->db_vec_count;
1066}
1067
Dave Jiangf6e51c32018-01-29 13:22:24 -07001068u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
Allen Hubbee26a5842015-04-09 10:33:20 -04001069{
1070 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1071
1072 if (db_vector < 0 || db_vector > ndev->db_vec_count)
1073 return 0;
1074
1075 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1076}
1077
1078static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1079{
1080 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1081
1082 return ndev_db_read(ndev,
1083 ndev->self_mmio +
1084 ndev->self_reg->db_bell);
1085}
1086
1087static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1088{
1089 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1090
1091 return ndev_db_write(ndev, db_bits,
1092 ndev->self_mmio +
1093 ndev->self_reg->db_bell);
1094}
1095
Dave Jiangf6e51c32018-01-29 13:22:24 -07001096int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
Allen Hubbee26a5842015-04-09 10:33:20 -04001097{
1098 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1099
1100 return ndev_db_set_mask(ndev, db_bits,
1101 ndev->self_mmio +
1102 ndev->self_reg->db_mask);
1103}
1104
Dave Jiangf6e51c32018-01-29 13:22:24 -07001105int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
Allen Hubbee26a5842015-04-09 10:33:20 -04001106{
1107 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1108
1109 return ndev_db_clear_mask(ndev, db_bits,
1110 ndev->self_mmio +
1111 ndev->self_reg->db_mask);
1112}
1113
Dave Jiangf6e51c32018-01-29 13:22:24 -07001114int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
1115 resource_size_t *db_size)
Allen Hubbee26a5842015-04-09 10:33:20 -04001116{
1117 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1118
1119 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1120 ndev->peer_reg->db_bell);
1121}
1122
1123static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1124{
1125 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1126
1127 return ndev_db_write(ndev, db_bits,
1128 ndev->peer_mmio +
1129 ndev->peer_reg->db_bell);
1130}
1131
Dave Jiangf6e51c32018-01-29 13:22:24 -07001132int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
Allen Hubbee26a5842015-04-09 10:33:20 -04001133{
1134 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1135}
1136
Dave Jiangf6e51c32018-01-29 13:22:24 -07001137int intel_ntb_spad_count(struct ntb_dev *ntb)
Allen Hubbee26a5842015-04-09 10:33:20 -04001138{
1139 struct intel_ntb_dev *ndev;
1140
1141 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1142
1143 return ndev->spad_count;
1144}
1145
Dave Jiangf6e51c32018-01-29 13:22:24 -07001146u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
Allen Hubbee26a5842015-04-09 10:33:20 -04001147{
1148 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1149
1150 return ndev_spad_read(ndev, idx,
1151 ndev->self_mmio +
1152 ndev->self_reg->spad);
1153}
1154
Dave Jiangf6e51c32018-01-29 13:22:24 -07001155int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
Allen Hubbee26a5842015-04-09 10:33:20 -04001156{
1157 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1158
1159 return ndev_spad_write(ndev, idx, val,
1160 ndev->self_mmio +
1161 ndev->self_reg->spad);
1162}
1163
Dave Jiangf6e51c32018-01-29 13:22:24 -07001164int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
1165 phys_addr_t *spad_addr)
Allen Hubbee26a5842015-04-09 10:33:20 -04001166{
1167 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1168
Serge Semind67288a2017-01-11 03:13:20 +03001169 return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
Allen Hubbee26a5842015-04-09 10:33:20 -04001170 ndev->peer_reg->spad);
1171}
1172
Dave Jiangf6e51c32018-01-29 13:22:24 -07001173u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
Allen Hubbee26a5842015-04-09 10:33:20 -04001174{
1175 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1176
Serge Semind67288a2017-01-11 03:13:20 +03001177 return ndev_spad_read(ndev, sidx,
Allen Hubbee26a5842015-04-09 10:33:20 -04001178 ndev->peer_mmio +
1179 ndev->peer_reg->spad);
1180}
1181
Dave Jiangf6e51c32018-01-29 13:22:24 -07001182int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
1183 u32 val)
Allen Hubbee26a5842015-04-09 10:33:20 -04001184{
1185 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1186
Serge Semind67288a2017-01-11 03:13:20 +03001187 return ndev_spad_write(ndev, sidx, val,
Allen Hubbee26a5842015-04-09 10:33:20 -04001188 ndev->peer_mmio +
1189 ndev->peer_reg->spad);
1190}
1191
Dave Jiang2f887b92015-05-20 12:55:47 -04001192static u64 xeon_db_ioread(void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04001193{
1194 return (u64)ioread16(mmio);
1195}
1196
Dave Jiang2f887b92015-05-20 12:55:47 -04001197static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
Allen Hubbee26a5842015-04-09 10:33:20 -04001198{
1199 iowrite16((u16)bits, mmio);
1200}
1201
Dave Jiang2f887b92015-05-20 12:55:47 -04001202static int xeon_poll_link(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001203{
1204 u16 reg_val;
1205 int rc;
1206
1207 ndev->reg->db_iowrite(ndev->db_link_mask,
1208 ndev->self_mmio +
1209 ndev->self_reg->db_bell);
1210
1211 rc = pci_read_config_word(ndev->ntb.pdev,
Dave Jiang2f887b92015-05-20 12:55:47 -04001212 XEON_LINK_STATUS_OFFSET, &reg_val);
Allen Hubbee26a5842015-04-09 10:33:20 -04001213 if (rc)
1214 return 0;
1215
1216 if (reg_val == ndev->lnk_sta)
1217 return 0;
1218
1219 ndev->lnk_sta = reg_val;
1220
1221 return 1;
1222}
1223
Dave Jiangf6e51c32018-01-29 13:22:24 -07001224int xeon_link_is_up(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001225{
Dave Jiang5ae0beb2015-05-19 16:59:34 -04001226 if (ndev->ntb.topo == NTB_TOPO_SEC)
1227 return 1;
1228
Allen Hubbee26a5842015-04-09 10:33:20 -04001229 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1230}
1231
Dave Jiangf6e51c32018-01-29 13:22:24 -07001232enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04001233{
Dave Jiang2f887b92015-05-20 12:55:47 -04001234 switch (ppd & XEON_PPD_TOPO_MASK) {
1235 case XEON_PPD_TOPO_B2B_USD:
Allen Hubbee26a5842015-04-09 10:33:20 -04001236 return NTB_TOPO_B2B_USD;
1237
Dave Jiang2f887b92015-05-20 12:55:47 -04001238 case XEON_PPD_TOPO_B2B_DSD:
Allen Hubbee26a5842015-04-09 10:33:20 -04001239 return NTB_TOPO_B2B_DSD;
1240
Dave Jiang2f887b92015-05-20 12:55:47 -04001241 case XEON_PPD_TOPO_PRI_USD:
1242 case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04001243 return NTB_TOPO_PRI;
1244
Dave Jiang2f887b92015-05-20 12:55:47 -04001245 case XEON_PPD_TOPO_SEC_USD:
1246 case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
Allen Hubbee26a5842015-04-09 10:33:20 -04001247 return NTB_TOPO_SEC;
1248 }
1249
1250 return NTB_TOPO_NONE;
1251}
1252
Dave Jiang2f887b92015-05-20 12:55:47 -04001253static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
Allen Hubbee26a5842015-04-09 10:33:20 -04001254{
Dave Jiang2f887b92015-05-20 12:55:47 -04001255 if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001256 dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04001257 return 1;
1258 }
1259 return 0;
1260}
1261
Dave Jiang2f887b92015-05-20 12:55:47 -04001262static int xeon_init_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001263{
Dave Jiang2f887b92015-05-20 12:55:47 -04001264 return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
1265 XEON_DB_MSIX_VECTOR_COUNT,
1266 XEON_DB_MSIX_VECTOR_SHIFT,
1267 XEON_DB_TOTAL_SHIFT);
Allen Hubbee26a5842015-04-09 10:33:20 -04001268}
1269
Dave Jiang2f887b92015-05-20 12:55:47 -04001270static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001271{
1272 ndev_deinit_isr(ndev);
1273}
1274
Dave Jiang2f887b92015-05-20 12:55:47 -04001275static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1276 const struct intel_b2b_addr *addr,
1277 const struct intel_b2b_addr *peer_addr)
Allen Hubbee26a5842015-04-09 10:33:20 -04001278{
1279 struct pci_dev *pdev;
1280 void __iomem *mmio;
1281 resource_size_t bar_size;
1282 phys_addr_t bar_addr;
1283 int b2b_bar;
1284 u8 bar_sz;
1285
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001286 pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -04001287 mmio = ndev->self_mmio;
1288
Allen Hubbe2aa2a772015-08-31 09:30:59 -04001289 if (ndev->b2b_idx == UINT_MAX) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001290 dev_dbg(&pdev->dev, "not using b2b mw\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001291 b2b_bar = 0;
1292 ndev->b2b_off = 0;
1293 } else {
1294 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1295 if (b2b_bar < 0)
1296 return -EIO;
1297
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001298 dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
Allen Hubbee26a5842015-04-09 10:33:20 -04001299
1300 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1301
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001302 dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
Allen Hubbee26a5842015-04-09 10:33:20 -04001303
Dave Jiang2f887b92015-05-20 12:55:47 -04001304 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001305 dev_dbg(&pdev->dev, "b2b using first half of bar\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001306 ndev->b2b_off = bar_size >> 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04001307 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001308 dev_dbg(&pdev->dev, "b2b using whole bar\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001309 ndev->b2b_off = 0;
1310 --ndev->mw_count;
1311 } else {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001312 dev_dbg(&pdev->dev, "b2b bar size is too small\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001313 return -EIO;
1314 }
1315 }
1316
1317 /* Reset the secondary bar sizes to match the primary bar sizes,
1318 * except disable or halve the size of the b2b secondary bar.
1319 *
1320 * Note: code for each specific bar size register, because the register
1321 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1322 */
Dave Jiang2f887b92015-05-20 12:55:47 -04001323 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001324 dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001325 if (b2b_bar == 2) {
1326 if (ndev->b2b_off)
1327 bar_sz -= 1;
1328 else
1329 bar_sz = 0;
1330 }
Dave Jiang2f887b92015-05-20 12:55:47 -04001331 pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
1332 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001333 dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001334
1335 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04001336 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001337 dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001338 if (b2b_bar == 4) {
1339 if (ndev->b2b_off)
1340 bar_sz -= 1;
1341 else
1342 bar_sz = 0;
1343 }
Dave Jiang2f887b92015-05-20 12:55:47 -04001344 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
1345 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001346 dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001347 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04001348 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001349 dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001350 if (b2b_bar == 4) {
1351 if (ndev->b2b_off)
1352 bar_sz -= 1;
1353 else
1354 bar_sz = 0;
1355 }
Dave Jiang2f887b92015-05-20 12:55:47 -04001356 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
1357 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001358 dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001359
Dave Jiang2f887b92015-05-20 12:55:47 -04001360 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001361 dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001362 if (b2b_bar == 5) {
1363 if (ndev->b2b_off)
1364 bar_sz -= 1;
1365 else
1366 bar_sz = 0;
1367 }
Dave Jiang2f887b92015-05-20 12:55:47 -04001368 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
1369 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001370 dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
Allen Hubbee26a5842015-04-09 10:33:20 -04001371 }
1372
1373 /* SBAR01 hit by first part of the b2b bar */
1374 if (b2b_bar == 0)
1375 bar_addr = addr->bar0_addr;
1376 else if (b2b_bar == 2)
1377 bar_addr = addr->bar2_addr64;
1378 else if (b2b_bar == 4 && !ndev->bar4_split)
1379 bar_addr = addr->bar4_addr64;
1380 else if (b2b_bar == 4)
1381 bar_addr = addr->bar4_addr32;
1382 else if (b2b_bar == 5)
1383 bar_addr = addr->bar5_addr32;
1384 else
1385 return -EIO;
1386
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001387 dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
Dave Jiang2f887b92015-05-20 12:55:47 -04001388 iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001389
1390 /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1391 * The b2b bar is either disabled above, or configured half-size, and
1392 * it starts at the PBAR xlat + offset.
1393 */
1394
1395 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001396 iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
1397 bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001398 dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001399
1400 if (!ndev->bar4_split) {
1401 bar_addr = addr->bar4_addr64 +
1402 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001403 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
1404 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001405 dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001406 } else {
1407 bar_addr = addr->bar4_addr32 +
1408 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001409 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
1410 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001411 dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001412
1413 bar_addr = addr->bar5_addr32 +
1414 (b2b_bar == 5 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001415 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
1416 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001417 dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001418 }
1419
1420 /* setup incoming bar limits == base addrs (zero length windows) */
1421
1422 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001423 iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
1424 bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001425 dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001426
1427 if (!ndev->bar4_split) {
1428 bar_addr = addr->bar4_addr64 +
1429 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001430 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
1431 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001432 dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001433 } else {
1434 bar_addr = addr->bar4_addr32 +
1435 (b2b_bar == 4 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001436 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
1437 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001438 dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001439
1440 bar_addr = addr->bar5_addr32 +
1441 (b2b_bar == 5 ? ndev->b2b_off : 0);
Dave Jiang2f887b92015-05-20 12:55:47 -04001442 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
1443 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001444 dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001445 }
1446
1447 /* zero incoming translation addrs */
Dave Jiang2f887b92015-05-20 12:55:47 -04001448 iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001449
1450 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04001451 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001452 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04001453 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
1454 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001455 }
1456
1457 /* zero outgoing translation limits (whole bar size windows) */
Dave Jiang2f887b92015-05-20 12:55:47 -04001458 iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001459 if (!ndev->bar4_split) {
Dave Jiang2f887b92015-05-20 12:55:47 -04001460 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001461 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04001462 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
1463 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001464 }
1465
1466 /* set outgoing translation offsets */
1467 bar_addr = peer_addr->bar2_addr64;
Dave Jiang2f887b92015-05-20 12:55:47 -04001468 iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
1469 bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001470 dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001471
1472 if (!ndev->bar4_split) {
1473 bar_addr = peer_addr->bar4_addr64;
Dave Jiang2f887b92015-05-20 12:55:47 -04001474 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
1475 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001476 dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001477 } else {
1478 bar_addr = peer_addr->bar4_addr32;
Dave Jiang2f887b92015-05-20 12:55:47 -04001479 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
1480 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001481 dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001482
1483 bar_addr = peer_addr->bar5_addr32;
Dave Jiang2f887b92015-05-20 12:55:47 -04001484 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
1485 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001486 dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001487 }
1488
1489 /* set the translation offset for b2b registers */
1490 if (b2b_bar == 0)
1491 bar_addr = peer_addr->bar0_addr;
1492 else if (b2b_bar == 2)
1493 bar_addr = peer_addr->bar2_addr64;
1494 else if (b2b_bar == 4 && !ndev->bar4_split)
1495 bar_addr = peer_addr->bar4_addr64;
1496 else if (b2b_bar == 4)
1497 bar_addr = peer_addr->bar4_addr32;
1498 else if (b2b_bar == 5)
1499 bar_addr = peer_addr->bar5_addr32;
1500 else
1501 return -EIO;
1502
1503 /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001504 dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
Dave Jiang2f887b92015-05-20 12:55:47 -04001505 iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
1506 iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
Allen Hubbee26a5842015-04-09 10:33:20 -04001507
1508 if (b2b_bar) {
1509 /* map peer ntb mmio config space registers */
1510 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
Dave Jiang2f887b92015-05-20 12:55:47 -04001511 XEON_B2B_MIN_SIZE);
Allen Hubbee26a5842015-04-09 10:33:20 -04001512 if (!ndev->peer_mmio)
1513 return -EIO;
Dave Jiang25ea9f22016-10-27 11:06:44 -07001514
1515 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
Allen Hubbee26a5842015-04-09 10:33:20 -04001516 }
1517
1518 return 0;
1519}
1520
Dave Jiang2f887b92015-05-20 12:55:47 -04001521static int xeon_init_ntb(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001522{
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001523 struct device *dev = &ndev->ntb.pdev->dev;
Allen Hubbee26a5842015-04-09 10:33:20 -04001524 int rc;
Dave Jiang5ae0beb2015-05-19 16:59:34 -04001525 u32 ntb_ctl;
Allen Hubbee26a5842015-04-09 10:33:20 -04001526
1527 if (ndev->bar4_split)
1528 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1529 else
Dave Jiang2f887b92015-05-20 12:55:47 -04001530 ndev->mw_count = XEON_MW_COUNT;
Allen Hubbee26a5842015-04-09 10:33:20 -04001531
Dave Jiang2f887b92015-05-20 12:55:47 -04001532 ndev->spad_count = XEON_SPAD_COUNT;
1533 ndev->db_count = XEON_DB_COUNT;
1534 ndev->db_link_mask = XEON_DB_LINK_BIT;
Allen Hubbee26a5842015-04-09 10:33:20 -04001535
1536 switch (ndev->ntb.topo) {
1537 case NTB_TOPO_PRI:
1538 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001539 dev_err(dev, "NTB Primary config disabled\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001540 return -EINVAL;
1541 }
Dave Jiang5ae0beb2015-05-19 16:59:34 -04001542
1543 /* enable link to allow secondary side device to appear */
1544 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1545 ntb_ctl &= ~NTB_CTL_DISABLE;
1546 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1547
Allen Hubbee26a5842015-04-09 10:33:20 -04001548 /* use half the spads for the peer */
1549 ndev->spad_count >>= 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04001550 ndev->self_reg = &xeon_pri_reg;
1551 ndev->peer_reg = &xeon_sec_reg;
1552 ndev->xlat_reg = &xeon_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04001553 break;
1554
1555 case NTB_TOPO_SEC:
1556 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001557 dev_err(dev, "NTB Secondary config disabled\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001558 return -EINVAL;
1559 }
1560 /* use half the spads for the peer */
1561 ndev->spad_count >>= 1;
Dave Jiang2f887b92015-05-20 12:55:47 -04001562 ndev->self_reg = &xeon_sec_reg;
1563 ndev->peer_reg = &xeon_pri_reg;
1564 ndev->xlat_reg = &xeon_pri_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04001565 break;
1566
1567 case NTB_TOPO_B2B_USD:
1568 case NTB_TOPO_B2B_DSD:
Dave Jiang2f887b92015-05-20 12:55:47 -04001569 ndev->self_reg = &xeon_pri_reg;
1570 ndev->peer_reg = &xeon_b2b_reg;
1571 ndev->xlat_reg = &xeon_sec_xlat;
Allen Hubbee26a5842015-04-09 10:33:20 -04001572
1573 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
Dave Jiang2f887b92015-05-20 12:55:47 -04001574 ndev->peer_reg = &xeon_pri_reg;
Allen Hubbee26a5842015-04-09 10:33:20 -04001575
1576 if (b2b_mw_idx < 0)
1577 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1578 else
1579 ndev->b2b_idx = b2b_mw_idx;
1580
Allen Hubbe2aa2a772015-08-31 09:30:59 -04001581 if (ndev->b2b_idx >= ndev->mw_count) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001582 dev_dbg(dev,
Allen Hubbe2aa2a772015-08-31 09:30:59 -04001583 "b2b_mw_idx %d invalid for mw_count %u\n",
1584 b2b_mw_idx, ndev->mw_count);
1585 return -EINVAL;
1586 }
1587
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001588 dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
Allen Hubbee26a5842015-04-09 10:33:20 -04001589 b2b_mw_idx, ndev->b2b_idx);
1590
1591 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001592 dev_warn(dev, "Reduce doorbell count by 1\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001593 ndev->db_count -= 1;
1594 }
1595
1596 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
Dave Jiang2f887b92015-05-20 12:55:47 -04001597 rc = xeon_setup_b2b_mw(ndev,
1598 &xeon_b2b_dsd_addr,
1599 &xeon_b2b_usd_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001600 } else {
Dave Jiang2f887b92015-05-20 12:55:47 -04001601 rc = xeon_setup_b2b_mw(ndev,
1602 &xeon_b2b_usd_addr,
1603 &xeon_b2b_dsd_addr);
Allen Hubbee26a5842015-04-09 10:33:20 -04001604 }
1605 if (rc)
1606 return rc;
1607
1608 /* Enable Bus Master and Memory Space on the secondary side */
1609 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
Dave Jiang2f887b92015-05-20 12:55:47 -04001610 ndev->self_mmio + XEON_SPCICMD_OFFSET);
Allen Hubbee26a5842015-04-09 10:33:20 -04001611
1612 break;
1613
1614 default:
1615 return -EINVAL;
1616 }
1617
1618 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1619
1620 ndev->reg->db_iowrite(ndev->db_valid_mask,
1621 ndev->self_mmio +
1622 ndev->self_reg->db_mask);
1623
1624 return 0;
1625}
1626
Dave Jiang2f887b92015-05-20 12:55:47 -04001627static int xeon_init_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001628{
1629 struct pci_dev *pdev;
1630 u8 ppd;
1631 int rc, mem;
1632
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001633 pdev = ndev->ntb.pdev;
Dave Jiangdd5d4d82015-05-08 12:24:40 -04001634
1635 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04001636 /* There is a Xeon hardware errata related to writes to SDOORBELL or
1637 * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
1638 * which may hang the system. To workaround this use the second memory
1639 * window to access the interrupt and scratch pad registers on the
1640 * remote system.
1641 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04001642 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1643 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1644 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1645 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1646 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1647 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1648 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1649 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1650 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1651 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1652 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1653 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04001654 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1655 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1656 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04001657 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1658 break;
1659 }
Allen Hubbee26a5842015-04-09 10:33:20 -04001660
Dave Jiangdd5d4d82015-05-08 12:24:40 -04001661 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04001662 /* There is a hardware errata related to accessing any register in
1663 * SB01BASE in the presence of bidirectional traffic crossing the NTB.
1664 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04001665 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1666 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1667 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1668 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1669 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1670 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04001671 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1672 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1673 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04001674 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1675 break;
1676 }
Allen Hubbee26a5842015-04-09 10:33:20 -04001677
Dave Jiangdd5d4d82015-05-08 12:24:40 -04001678 switch (pdev->device) {
Allen Hubbee26a5842015-04-09 10:33:20 -04001679 /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
1680 * mirrored to the remote system. Shrink the number of bits by one,
1681 * since bit 14 is the last bit.
1682 */
Dave Jiangdd5d4d82015-05-08 12:24:40 -04001683 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1684 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1685 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1686 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1687 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1688 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1689 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1690 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1691 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1692 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1693 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1694 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
Dave Jiang0a5d19d2015-07-13 08:07:18 -04001695 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1696 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1697 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
Dave Jiangdd5d4d82015-05-08 12:24:40 -04001698 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1699 break;
1700 }
Allen Hubbee26a5842015-04-09 10:33:20 -04001701
Dave Jiang2f887b92015-05-20 12:55:47 -04001702 ndev->reg = &xeon_reg;
Allen Hubbee26a5842015-04-09 10:33:20 -04001703
Dave Jiang2f887b92015-05-20 12:55:47 -04001704 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
Allen Hubbee26a5842015-04-09 10:33:20 -04001705 if (rc)
1706 return -EIO;
1707
Dave Jiang2f887b92015-05-20 12:55:47 -04001708 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001709 dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
Allen Hubbee26a5842015-04-09 10:33:20 -04001710 ntb_topo_string(ndev->ntb.topo));
1711 if (ndev->ntb.topo == NTB_TOPO_NONE)
1712 return -EINVAL;
1713
1714 if (ndev->ntb.topo != NTB_TOPO_SEC) {
Dave Jiang2f887b92015-05-20 12:55:47 -04001715 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001716 dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
Allen Hubbee26a5842015-04-09 10:33:20 -04001717 ppd, ndev->bar4_split);
1718 } else {
1719 /* This is a way for transparent BAR to figure out if we are
1720 * doing split BAR or not. There is no way for the hw on the
1721 * transparent side to know and set the PPD.
1722 */
1723 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1724 ndev->bar4_split = hweight32(mem) ==
1725 HSX_SPLIT_BAR_MW_COUNT + 1;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001726 dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
Allen Hubbee26a5842015-04-09 10:33:20 -04001727 mem, ndev->bar4_split);
1728 }
1729
Dave Jiang2f887b92015-05-20 12:55:47 -04001730 rc = xeon_init_ntb(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001731 if (rc)
1732 return rc;
1733
Dave Jiang2f887b92015-05-20 12:55:47 -04001734 return xeon_init_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001735}
1736
Dave Jiang2f887b92015-05-20 12:55:47 -04001737static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
Allen Hubbee26a5842015-04-09 10:33:20 -04001738{
Dave Jiang2f887b92015-05-20 12:55:47 -04001739 xeon_deinit_isr(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001740}
1741
1742static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1743{
1744 int rc;
1745
1746 pci_set_drvdata(pdev, ndev);
1747
1748 rc = pci_enable_device(pdev);
1749 if (rc)
1750 goto err_pci_enable;
1751
1752 rc = pci_request_regions(pdev, NTB_NAME);
1753 if (rc)
1754 goto err_pci_regions;
1755
1756 pci_set_master(pdev);
1757
1758 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1759 if (rc) {
1760 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1761 if (rc)
1762 goto err_dma_mask;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001763 dev_warn(&pdev->dev, "Cannot DMA highmem\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001764 }
1765
1766 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1767 if (rc) {
1768 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1769 if (rc)
1770 goto err_dma_mask;
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001771 dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
Allen Hubbee26a5842015-04-09 10:33:20 -04001772 }
Serge Semin417cf392017-12-06 17:31:53 +03001773 rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
1774 dma_get_mask(&pdev->dev));
1775 if (rc)
1776 goto err_dma_mask;
Allen Hubbee26a5842015-04-09 10:33:20 -04001777
1778 ndev->self_mmio = pci_iomap(pdev, 0, 0);
1779 if (!ndev->self_mmio) {
1780 rc = -EIO;
1781 goto err_mmio;
1782 }
1783 ndev->peer_mmio = ndev->self_mmio;
Dave Jiang25ea9f22016-10-27 11:06:44 -07001784 ndev->peer_addr = pci_resource_start(pdev, 0);
Allen Hubbee26a5842015-04-09 10:33:20 -04001785
1786 return 0;
1787
1788err_mmio:
1789err_dma_mask:
1790 pci_clear_master(pdev);
1791 pci_release_regions(pdev);
1792err_pci_regions:
1793 pci_disable_device(pdev);
1794err_pci_enable:
1795 pci_set_drvdata(pdev, NULL);
1796 return rc;
1797}
1798
1799static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
1800{
Logan Gunthorpe48ea0212017-01-10 17:33:37 -07001801 struct pci_dev *pdev = ndev->ntb.pdev;
Allen Hubbee26a5842015-04-09 10:33:20 -04001802
1803 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
1804 pci_iounmap(pdev, ndev->peer_mmio);
1805 pci_iounmap(pdev, ndev->self_mmio);
1806
1807 pci_clear_master(pdev);
1808 pci_release_regions(pdev);
1809 pci_disable_device(pdev);
1810 pci_set_drvdata(pdev, NULL);
1811}
1812
1813static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1814 struct pci_dev *pdev)
1815{
1816 ndev->ntb.pdev = pdev;
1817 ndev->ntb.topo = NTB_TOPO_NONE;
1818 ndev->ntb.ops = &intel_ntb_ops;
1819
1820 ndev->b2b_off = 0;
Allen Hubbe2aa2a772015-08-31 09:30:59 -04001821 ndev->b2b_idx = UINT_MAX;
Allen Hubbee26a5842015-04-09 10:33:20 -04001822
1823 ndev->bar4_split = 0;
1824
1825 ndev->mw_count = 0;
1826 ndev->spad_count = 0;
1827 ndev->db_count = 0;
1828 ndev->db_vec_count = 0;
1829 ndev->db_vec_shift = 0;
1830
1831 ndev->ntb_ctl = 0;
1832 ndev->lnk_sta = 0;
1833
1834 ndev->db_valid_mask = 0;
1835 ndev->db_link_mask = 0;
1836 ndev->db_mask = 0;
1837
1838 spin_lock_init(&ndev->db_mask_lock);
1839}
1840
1841static int intel_ntb_pci_probe(struct pci_dev *pdev,
1842 const struct pci_device_id *id)
1843{
1844 struct intel_ntb_dev *ndev;
Allen Hubbe0e041fb2015-05-19 12:04:52 -04001845 int rc, node;
1846
1847 node = dev_to_node(&pdev->dev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001848
Dave Jiang6c1e8ab2018-01-29 13:22:30 -07001849 if (pdev_is_gen1(pdev)) {
Allen Hubbe0e041fb2015-05-19 12:04:52 -04001850 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -04001851 if (!ndev) {
1852 rc = -ENOMEM;
1853 goto err_ndev;
1854 }
1855
1856 ndev_init_struct(ndev, pdev);
1857
1858 rc = intel_ntb_init_pci(ndev, pdev);
1859 if (rc)
1860 goto err_init_pci;
1861
Dave Jiang2f887b92015-05-20 12:55:47 -04001862 rc = xeon_init_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001863 if (rc)
1864 goto err_init_dev;
1865
Dave Jiang6c1e8ab2018-01-29 13:22:30 -07001866 } else if (pdev_is_gen3(pdev)) {
Dave Jiang783dfa62016-11-16 14:03:38 -07001867 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1868 if (!ndev) {
1869 rc = -ENOMEM;
1870 goto err_ndev;
1871 }
1872
1873 ndev_init_struct(ndev, pdev);
1874 ndev->ntb.ops = &intel_ntb3_ops;
1875
1876 rc = intel_ntb_init_pci(ndev, pdev);
1877 if (rc)
1878 goto err_init_pci;
1879
Dave Jiang6c1e8ab2018-01-29 13:22:30 -07001880 rc = gen3_init_dev(ndev);
Dave Jiang783dfa62016-11-16 14:03:38 -07001881 if (rc)
1882 goto err_init_dev;
1883
Allen Hubbee26a5842015-04-09 10:33:20 -04001884 } else {
1885 rc = -EINVAL;
1886 goto err_ndev;
1887 }
1888
1889 ndev_reset_unsafe_flags(ndev);
1890
1891 ndev->reg->poll_link(ndev);
1892
1893 ndev_init_debugfs(ndev);
1894
1895 rc = ntb_register_device(&ndev->ntb);
1896 if (rc)
1897 goto err_register;
1898
Dave Jiang7eb38782015-06-15 08:21:33 -04001899 dev_info(&pdev->dev, "NTB device registered.\n");
1900
Allen Hubbee26a5842015-04-09 10:33:20 -04001901 return 0;
1902
1903err_register:
1904 ndev_deinit_debugfs(ndev);
Dave Jiang6c1e8ab2018-01-29 13:22:30 -07001905 if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
Dave Jiang2f887b92015-05-20 12:55:47 -04001906 xeon_deinit_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001907err_init_dev:
1908 intel_ntb_deinit_pci(ndev);
1909err_init_pci:
1910 kfree(ndev);
1911err_ndev:
1912 return rc;
1913}
1914
1915static void intel_ntb_pci_remove(struct pci_dev *pdev)
1916{
1917 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
1918
1919 ntb_unregister_device(&ndev->ntb);
1920 ndev_deinit_debugfs(ndev);
Dave Jiang6c1e8ab2018-01-29 13:22:30 -07001921 if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
Dave Jiang2f887b92015-05-20 12:55:47 -04001922 xeon_deinit_dev(ndev);
Allen Hubbee26a5842015-04-09 10:33:20 -04001923 intel_ntb_deinit_pci(ndev);
1924 kfree(ndev);
1925}
1926
Dave Jiang2f887b92015-05-20 12:55:47 -04001927static const struct intel_ntb_reg xeon_reg = {
1928 .poll_link = xeon_poll_link,
1929 .link_is_up = xeon_link_is_up,
1930 .db_ioread = xeon_db_ioread,
1931 .db_iowrite = xeon_db_iowrite,
Allen Hubbee26a5842015-04-09 10:33:20 -04001932 .db_size = sizeof(u32),
Dave Jiang2f887b92015-05-20 12:55:47 -04001933 .ntb_ctl = XEON_NTBCNTL_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04001934 .mw_bar = {2, 4, 5},
1935};
1936
Dave Jiang2f887b92015-05-20 12:55:47 -04001937static const struct intel_ntb_alt_reg xeon_pri_reg = {
1938 .db_bell = XEON_PDOORBELL_OFFSET,
1939 .db_mask = XEON_PDBMSK_OFFSET,
1940 .spad = XEON_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04001941};
1942
Dave Jiang2f887b92015-05-20 12:55:47 -04001943static const struct intel_ntb_alt_reg xeon_sec_reg = {
1944 .db_bell = XEON_SDOORBELL_OFFSET,
1945 .db_mask = XEON_SDBMSK_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04001946 /* second half of the scratchpads */
Dave Jiang2f887b92015-05-20 12:55:47 -04001947 .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
Allen Hubbee26a5842015-04-09 10:33:20 -04001948};
1949
Dave Jiang2f887b92015-05-20 12:55:47 -04001950static const struct intel_ntb_alt_reg xeon_b2b_reg = {
1951 .db_bell = XEON_B2B_DOORBELL_OFFSET,
1952 .spad = XEON_B2B_SPAD_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04001953};
1954
Dave Jiang2f887b92015-05-20 12:55:47 -04001955static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
Allen Hubbee26a5842015-04-09 10:33:20 -04001956 /* Note: no primary .bar0_base visible to the secondary side.
1957 *
1958 * The secondary side cannot get the base address stored in primary
1959 * bars. The base address is necessary to set the limit register to
1960 * any value other than zero, or unlimited.
1961 *
1962 * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
1963 * window by setting the limit equal to base, nor can it limit the size
1964 * of the memory window by setting the limit to base + size.
1965 */
Dave Jiang2f887b92015-05-20 12:55:47 -04001966 .bar2_limit = XEON_PBAR23LMT_OFFSET,
1967 .bar2_xlat = XEON_PBAR23XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04001968};
1969
Dave Jiang2f887b92015-05-20 12:55:47 -04001970static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
1971 .bar0_base = XEON_SBAR0BASE_OFFSET,
1972 .bar2_limit = XEON_SBAR23LMT_OFFSET,
1973 .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
Allen Hubbee26a5842015-04-09 10:33:20 -04001974};
1975
Dave Jiangf6e51c32018-01-29 13:22:24 -07001976struct intel_b2b_addr xeon_b2b_usd_addr = {
Dave Jiang8b782fa2015-09-24 13:03:05 -07001977 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
1978 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
1979 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
1980 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
Allen Hubbee26a5842015-04-09 10:33:20 -04001981};
1982
Dave Jiangf6e51c32018-01-29 13:22:24 -07001983struct intel_b2b_addr xeon_b2b_dsd_addr = {
Dave Jiang8b782fa2015-09-24 13:03:05 -07001984 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
1985 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
1986 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
1987 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
Allen Hubbee26a5842015-04-09 10:33:20 -04001988};
1989
1990/* operations for primary side of local ntb */
1991static const struct ntb_dev_ops intel_ntb_ops = {
1992 .mw_count = intel_ntb_mw_count,
Serge Semin443b9a12017-01-11 03:11:33 +03001993 .mw_get_align = intel_ntb_mw_get_align,
Allen Hubbee26a5842015-04-09 10:33:20 -04001994 .mw_set_trans = intel_ntb_mw_set_trans,
Serge Semin443b9a12017-01-11 03:11:33 +03001995 .peer_mw_count = intel_ntb_peer_mw_count,
1996 .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
Allen Hubbee26a5842015-04-09 10:33:20 -04001997 .link_is_up = intel_ntb_link_is_up,
1998 .link_enable = intel_ntb_link_enable,
1999 .link_disable = intel_ntb_link_disable,
2000 .db_is_unsafe = intel_ntb_db_is_unsafe,
2001 .db_valid_mask = intel_ntb_db_valid_mask,
2002 .db_vector_count = intel_ntb_db_vector_count,
2003 .db_vector_mask = intel_ntb_db_vector_mask,
2004 .db_read = intel_ntb_db_read,
2005 .db_clear = intel_ntb_db_clear,
2006 .db_set_mask = intel_ntb_db_set_mask,
2007 .db_clear_mask = intel_ntb_db_clear_mask,
2008 .peer_db_addr = intel_ntb_peer_db_addr,
2009 .peer_db_set = intel_ntb_peer_db_set,
2010 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2011 .spad_count = intel_ntb_spad_count,
2012 .spad_read = intel_ntb_spad_read,
2013 .spad_write = intel_ntb_spad_write,
2014 .peer_spad_addr = intel_ntb_peer_spad_addr,
2015 .peer_spad_read = intel_ntb_peer_spad_read,
2016 .peer_spad_write = intel_ntb_peer_spad_write,
2017};
2018
2019static const struct file_operations intel_ntb_debugfs_info = {
2020 .owner = THIS_MODULE,
2021 .open = simple_open,
2022 .read = ndev_debugfs_read,
2023};
2024
2025static const struct pci_device_id intel_ntb_pci_tbl[] = {
Jon Masonfce8a7b2012-11-16 19:27:12 -07002026 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002027 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002028 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2029 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002030 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002031 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2032 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2033 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2034 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002035 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
Jon Masonbe4dac02012-09-28 11:38:48 -07002036 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2037 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2038 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2039 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
Dave Jiang0a5d19d2015-07-13 08:07:18 -04002040 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
Dave Jiang783dfa62016-11-16 14:03:38 -07002041 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
Jon Masonfce8a7b2012-11-16 19:27:12 -07002042 {0}
2043};
Allen Hubbee26a5842015-04-09 10:33:20 -04002044MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
Jon Masonfce8a7b2012-11-16 19:27:12 -07002045
Allen Hubbee26a5842015-04-09 10:33:20 -04002046static struct pci_driver intel_ntb_pci_driver = {
2047 .name = KBUILD_MODNAME,
2048 .id_table = intel_ntb_pci_tbl,
2049 .probe = intel_ntb_pci_probe,
2050 .remove = intel_ntb_pci_remove,
Jon Mason6465d022014-04-07 10:55:47 -07002051};
2052
Allen Hubbee26a5842015-04-09 10:33:20 -04002053static int __init intel_ntb_pci_driver_init(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07002054{
Dave Jiang7eb38782015-06-15 08:21:33 -04002055 pr_info("%s %s\n", NTB_DESC, NTB_VER);
2056
Allen Hubbee26a5842015-04-09 10:33:20 -04002057 if (debugfs_initialized())
Jon Mason1517a3f2013-07-30 15:58:49 -07002058 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2059
Allen Hubbee26a5842015-04-09 10:33:20 -04002060 return pci_register_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07002061}
Allen Hubbee26a5842015-04-09 10:33:20 -04002062module_init(intel_ntb_pci_driver_init);
Jon Mason1517a3f2013-07-30 15:58:49 -07002063
Allen Hubbee26a5842015-04-09 10:33:20 -04002064static void __exit intel_ntb_pci_driver_exit(void)
Jon Mason1517a3f2013-07-30 15:58:49 -07002065{
Allen Hubbee26a5842015-04-09 10:33:20 -04002066 pci_unregister_driver(&intel_ntb_pci_driver);
Jon Mason1517a3f2013-07-30 15:58:49 -07002067
Allen Hubbee26a5842015-04-09 10:33:20 -04002068 debugfs_remove_recursive(debugfs_dir);
Jon Mason1517a3f2013-07-30 15:58:49 -07002069}
Allen Hubbee26a5842015-04-09 10:33:20 -04002070module_exit(intel_ntb_pci_driver_exit);