blob: 101fbbbc2522baab5764c4b155f81a5d2a73a176 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07002 * Copyright(c) 2015 - 2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Dennis Dalessandro41973442016-07-25 07:52:36 -070066#include "affinity.h"
Don Hiatt243d9f42017-03-20 17:26:20 -070067#include "debugfs.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
69#define NUM_IB_PORTS 1
70
71uint kdeth_qp;
72module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74
75uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76module_param(num_vls, uint, S_IRUGO);
77MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78
79/*
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
85 */
86uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87module_param(rcv_intr_timeout, uint, S_IRUGO);
88MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89
90uint rcv_intr_count = 16; /* same as qib */
91module_param(rcv_intr_count, uint, S_IRUGO);
92MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93
94ushort link_crc_mask = SUPPORTED_CRCS;
95module_param(link_crc_mask, ushort, S_IRUGO);
96MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97
98uint loopback;
99module_param_named(loopback, loopback, uint, S_IRUGO);
100MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101
102/* Other driver tunables */
103uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104static ushort crc_14b_sideband = 1;
105static uint use_flr = 1;
106uint quick_linkup; /* skip LNI */
107
108struct flag_table {
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
112 u16 unused0;
113 u32 unused1;
114};
115
116/* str must be a string constant */
117#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118#define FLAG_ENTRY0(str, flag) {flag, str, 0}
119
120/* Send Error Consequences */
121#define SEC_WRITE_DROPPED 0x1
122#define SEC_PACKET_DROPPED 0x2
123#define SEC_SC_HALTED 0x4 /* per-context only */
124#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125
Harish Chegondi8784ac02016-07-25 13:38:50 -0700126#define DEFAULT_KRCVQS 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700129
130/*
131 * RSM instance allocation
132 * 0 - Verbs
133 * 1 - User Fecn Handling
134 * 2 - Vnic
135 */
136#define RSM_INS_VERBS 0
137#define RSM_INS_FECN 1
138#define RSM_INS_VNIC 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400139
140/* Bit offset into the GUID which carries HFI id information */
141#define GUID_HFI_INDEX_SHIFT 39
142
143/* extract the emulation revision */
144#define emulator_rev(dd) ((dd)->irev >> 8)
145/* parallel and serial emulation versions are 3 and 4 respectively */
146#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
148
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700149/* RSM fields for Verbs */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400150/* packet type */
151#define IB_PACKET_TYPE 2ull
152#define QW_SHIFT 6ull
153/* QPN[7..1] */
154#define QPN_WIDTH 7ull
155
156/* LRH.BTH: QW 0, OFFSET 48 - for match */
157#define LRH_BTH_QW 0ull
158#define LRH_BTH_BIT_OFFSET 48ull
159#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
160#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161#define LRH_BTH_SELECT
162#define LRH_BTH_MASK 3ull
163#define LRH_BTH_VALUE 2ull
164
165/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166#define LRH_SC_QW 0ull
167#define LRH_SC_BIT_OFFSET 56ull
168#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
169#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170#define LRH_SC_MASK 128ull
171#define LRH_SC_VALUE 0ull
172
173/* SC[n..0] QW 0, OFFSET 60 - for select */
174#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
175
176/* QPN[m+n:1] QW 1, OFFSET 1 */
177#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
178
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700179/* RSM fields for Vnic */
180/* L2_TYPE: QW 0, OFFSET 61 - for match */
181#define L2_TYPE_QW 0ull
182#define L2_TYPE_BIT_OFFSET 61ull
183#define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
184#define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185#define L2_TYPE_MASK 3ull
186#define L2_16B_VALUE 2ull
187
188/* L4_TYPE QW 1, OFFSET 0 - for match */
189#define L4_TYPE_QW 1ull
190#define L4_TYPE_BIT_OFFSET 0ull
191#define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
192#define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193#define L4_16B_TYPE_MASK 0xFFull
194#define L4_16B_ETH_VALUE 0x78ull
195
196/* 16B VESWID - for select */
197#define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
198/* 16B ENTROPY - for select */
199#define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
200
Mike Marciniszyn77241052015-07-30 15:17:43 -0400201/* defines to build power on SC2VL table */
202#define SC2VL_VAL( \
203 num, \
204 sc0, sc0val, \
205 sc1, sc1val, \
206 sc2, sc2val, \
207 sc3, sc3val, \
208 sc4, sc4val, \
209 sc5, sc5val, \
210 sc6, sc6val, \
211 sc7, sc7val) \
212( \
213 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
221)
222
223#define DC_SC_VL_VAL( \
224 range, \
225 e0, e0val, \
226 e1, e1val, \
227 e2, e2val, \
228 e3, e3val, \
229 e4, e4val, \
230 e5, e5val, \
231 e6, e6val, \
232 e7, e7val, \
233 e8, e8val, \
234 e9, e9val, \
235 e10, e10val, \
236 e11, e11val, \
237 e12, e12val, \
238 e13, e13val, \
239 e14, e14val, \
240 e15, e15val) \
241( \
242 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
258)
259
260/* all CceStatus sub-block freeze bits */
261#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 | CCE_STATUS_RXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
265/* all CceStatus sub-block TXE pause bits */
266#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 | CCE_STATUS_TXE_PAUSED_SMASK \
268 | CCE_STATUS_SDMA_PAUSED_SMASK)
269/* all CceStatus sub-block RXE pause bits */
270#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
271
Jakub Pawlak2b719042016-07-01 16:01:22 -0700272#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
274
Mike Marciniszyn77241052015-07-30 15:17:43 -0400275/*
276 * CCE Error flags.
277 */
278static struct flag_table cce_err_status_flags[] = {
279/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
280 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
281/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
283/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
285/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
287/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
288 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
289/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
290 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
291/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
293/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
295/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
297/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
299/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
301/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
303/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
305/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
306 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
307/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
308 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
309/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
310 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
311/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
312 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
313/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
314 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
315/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
316 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
317/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
318 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
319/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
320 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
321/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
322 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
323/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
325/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
326 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
327/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
328 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
329/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
330 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
331/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
332 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
333/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
335/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
336 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
337/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
338 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
339/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
341/*31*/ FLAG_ENTRY0("LATriggered",
342 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
343/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
344 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
345/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
347/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
349/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
351/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
352 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
353/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
354 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
355/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
356 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
357/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
358 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
359/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
360 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
361/*41-63 reserved*/
362};
363
364/*
365 * Misc Error flags
366 */
367#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368static struct flag_table misc_err_status_flags[] = {
369/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
370/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
371/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
372/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
373/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
374/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
375/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
376/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
377/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
378/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
379/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
380/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
381/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
382};
383
384/*
385 * TXE PIO Error flags and consequences
386 */
387static struct flag_table pio_err_status_flags[] = {
388/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
389 SEC_WRITE_DROPPED,
390 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
391/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
394/* 2*/ FLAG_ENTRY("PioCsrParity",
395 SEC_SPC_FREEZE,
396 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
397/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
398 SEC_SPC_FREEZE,
399 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
400/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
401 SEC_SPC_FREEZE,
402 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
403/* 5*/ FLAG_ENTRY("PioPccFifoParity",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
406/* 6*/ FLAG_ENTRY("PioPecFifoParity",
407 SEC_SPC_FREEZE,
408 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
409/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
410 SEC_SPC_FREEZE,
411 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
412/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
415/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
416 SEC_SPC_FREEZE,
417 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
418/*10*/ FLAG_ENTRY("PioSmPktResetParity",
419 SEC_SPC_FREEZE,
420 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
421/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
424/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
425 SEC_SPC_FREEZE,
426 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
427/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
428 0,
429 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
430/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
431 0,
432 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
433/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
436/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
439/*17*/ FLAG_ENTRY("PioInitSmIn",
440 0,
441 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
442/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
445/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
448/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
449 0,
450 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
451/*21*/ FLAG_ENTRY("PioWriteDataParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
454/*22*/ FLAG_ENTRY("PioStateMachine",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
457/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800458 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400459 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
460/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800461 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400462 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
463/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
464 SEC_SPC_FREEZE,
465 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
466/*26*/ FLAG_ENTRY("PioVlfSopParity",
467 SEC_SPC_FREEZE,
468 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
469/*27*/ FLAG_ENTRY("PioVlFifoParity",
470 SEC_SPC_FREEZE,
471 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
472/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
473 SEC_SPC_FREEZE,
474 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
475/*29*/ FLAG_ENTRY("PioPpmcSopLen",
476 SEC_SPC_FREEZE,
477 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
478/*30-31 reserved*/
479/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
480 SEC_SPC_FREEZE,
481 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
482/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
483 SEC_SPC_FREEZE,
484 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
485/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
486 SEC_SPC_FREEZE,
487 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
488/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
489 SEC_SPC_FREEZE,
490 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
491/*36-63 reserved*/
492};
493
494/* TXE PIO errors that cause an SPC freeze */
495#define ALL_PIO_FREEZE_ERR \
496 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
525
526/*
527 * TXE SDMA Error flags
528 */
529static struct flag_table sdma_err_status_flags[] = {
530/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
531 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
532/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
533 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
534/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
536/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
538/*04-63 reserved*/
539};
540
541/* TXE SDMA errors that cause an SPC freeze */
542#define ALL_SDMA_FREEZE_ERR \
543 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
546
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800547/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548#define PORT_DISCARD_EGRESS_ERRS \
549 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
552
Mike Marciniszyn77241052015-07-30 15:17:43 -0400553/*
554 * TXE Egress Error flags
555 */
556#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557static struct flag_table egress_err_status_flags[] = {
558/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
559/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
560/* 2 reserved */
561/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
563/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
564/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
565/* 6 reserved */
566/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
568/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
570/* 9-10 reserved */
571/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
573/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
574/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
575/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
576/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
577/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 SEES(TX_SDMA0_DISALLOWED_PACKET)),
579/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 SEES(TX_SDMA1_DISALLOWED_PACKET)),
581/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 SEES(TX_SDMA2_DISALLOWED_PACKET)),
583/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 SEES(TX_SDMA3_DISALLOWED_PACKET)),
585/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 SEES(TX_SDMA4_DISALLOWED_PACKET)),
587/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 SEES(TX_SDMA5_DISALLOWED_PACKET)),
589/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 SEES(TX_SDMA6_DISALLOWED_PACKET)),
591/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 SEES(TX_SDMA7_DISALLOWED_PACKET)),
593/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 SEES(TX_SDMA8_DISALLOWED_PACKET)),
595/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 SEES(TX_SDMA9_DISALLOWED_PACKET)),
597/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 SEES(TX_SDMA10_DISALLOWED_PACKET)),
599/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 SEES(TX_SDMA11_DISALLOWED_PACKET)),
601/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 SEES(TX_SDMA12_DISALLOWED_PACKET)),
603/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 SEES(TX_SDMA13_DISALLOWED_PACKET)),
605/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 SEES(TX_SDMA14_DISALLOWED_PACKET)),
607/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 SEES(TX_SDMA15_DISALLOWED_PACKET)),
609/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
611/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
613/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
615/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
617/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
619/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
621/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
623/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
625/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
627/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
628/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
629/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
630/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
631/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
632/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
633/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
634/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
635/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
636/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
637/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
638/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
639/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
640/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
641/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
642/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
643/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
644/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
645/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
646/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
647/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
648/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
650/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
652};
653
654/*
655 * TXE Egress Error Info flags
656 */
657#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658static struct flag_table egress_err_info_flags[] = {
659/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
660/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
661/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
664/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
665/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
666/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
667/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
668/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
669/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
670/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
671/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
672/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
673/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
674/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
675/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
676/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
677/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
678/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
679/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
680/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
681};
682
683/* TXE Egress errors that cause an SPC freeze */
684#define ALL_TXE_EGRESS_FREEZE_ERR \
685 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 | SEES(TX_LAUNCH_CSR_PARITY) \
690 | SEES(TX_SBRD_CTL_CSR_PARITY) \
691 | SEES(TX_CONFIG_PARITY) \
692 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 | SEES(TX_CREDIT_RETURN_PARITY))
702
703/*
704 * TXE Send error flags
705 */
706#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500708/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400709/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
710/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
711};
712
713/*
714 * TXE Send Context Error flags and consequences
715 */
716static struct flag_table sc_err_status_flags[] = {
717/* 0*/ FLAG_ENTRY("InconsistentSop",
718 SEC_PACKET_DROPPED | SEC_SC_HALTED,
719 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
720/* 1*/ FLAG_ENTRY("DisallowedPacket",
721 SEC_PACKET_DROPPED | SEC_SC_HALTED,
722 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
723/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
724 SEC_WRITE_DROPPED | SEC_SC_HALTED,
725 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
726/* 3*/ FLAG_ENTRY("WriteOverflow",
727 SEC_WRITE_DROPPED | SEC_SC_HALTED,
728 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
729/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
730 SEC_WRITE_DROPPED | SEC_SC_HALTED,
731 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
732/* 5-63 reserved*/
733};
734
735/*
736 * RXE Receive Error flags
737 */
738#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739static struct flag_table rxe_err_status_flags[] = {
740/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
741/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
742/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
743/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
744/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
745/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
746/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
747/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
748/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
749/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
750/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
751/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
752/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
753/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
754/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
755/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
756/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
758/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
759/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
760/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 RXES(RBUF_BLOCK_LIST_READ_UNC)),
762/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 RXES(RBUF_BLOCK_LIST_READ_COR)),
764/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
766/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 RXES(RBUF_CSR_QENT_CNT_PARITY)),
768/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
770/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
772/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
773/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
774/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
776/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
777/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
778/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
779/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
780/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
781/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
782/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
783/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 RXES(RBUF_FL_INITDONE_PARITY)),
785/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
787/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
788/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
789/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
790/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 RXES(LOOKUP_DES_PART1_UNC_COR)),
792/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 RXES(LOOKUP_DES_PART2_PARITY)),
794/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
795/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
796/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
797/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
798/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
799/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
800/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
801/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
802/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
803/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
804/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
805/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
806/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
807/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
808/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
809/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
810/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
811/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
812/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
813/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
814/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
815/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
816};
817
818/* RXE errors that will trigger an SPC freeze */
819#define ALL_RXE_FREEZE_ERR \
820 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
864
865#define RXE_FREEZE_ABORT_MASK \
866 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
869
870/*
871 * DCC Error Flags
872 */
873#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874static struct flag_table dcc_err_flags[] = {
875 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
876 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
877 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
878 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
880 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
881 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
882 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
883 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
884 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
885 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
886 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
887 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
888 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
889 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
890 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
891 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
892 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
893 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
894 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
895 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
896 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
897 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
898 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
899 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
900 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
901 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
902 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
903 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
904 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
905 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
906 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
908 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
909 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
910 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
911 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
912 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
913 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
914 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
915 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
916 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
917 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
919 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
920 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
921};
922
923/*
924 * LCB error flags
925 */
926#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927static struct flag_table lcb_err_flags[] = {
928/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
929/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
930/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
931/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
933/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
934/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
935/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
936/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
937/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
938/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
939/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
940/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
941/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
942/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
944/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
945/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
946/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
947/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
948/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
949/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
951/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
952/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
953/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
954/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
955/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
956/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
957/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
959/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
960/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
962/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 LCBE(REDUNDANT_FLIT_PARITY_ERR))
964};
965
966/*
967 * DC8051 Error Flags
968 */
969#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970static struct flag_table dc8051_err_flags[] = {
971 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
972 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
973 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
974 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
975 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
976 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
977 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
978 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
979 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800980 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400981 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
982};
983
984/*
985 * DC8051 Information Error flags
986 *
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
988 */
989static struct flag_table dc8051_info_err_flags[] = {
990 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
991 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
992 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
993 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800994 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400995 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
996 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
997 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
998 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
999 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -08001002 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
Dean Luick50921be2016-09-25 07:41:53 -07001003 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1004 FLAG_ENTRY0("External Device Request Timeout",
1005 EXTERNAL_DEVICE_REQ_TIMEOUT),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001006};
1007
1008/*
1009 * DC8051 Information Host Information flags
1010 *
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1012 */
1013static struct flag_table dc8051_info_host_msg_flags[] = {
1014 FLAG_ENTRY0("Host request done", 0x0001),
Bartlomiej Dudekddbf2ef2017-06-09 15:59:26 -07001015 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016 FLAG_ENTRY0("BC SMA message", 0x0004),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001017 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 FLAG_ENTRY0("External device config request", 0x0020),
1020 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 FLAG_ENTRY0("Link going down", 0x0100),
Bartlomiej Dudekddbf2ef2017-06-09 15:59:26 -07001023 FLAG_ENTRY0("Link width downgraded", 0x0200),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001024};
1025
Mike Marciniszyn77241052015-07-30 15:17:43 -04001026static u32 encoded_size(u32 size);
1027static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1028static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1029static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1030 u8 *continuous);
1031static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1032 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1033static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1034 u8 *remote_tx_rate, u16 *link_widths);
1035static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1036 u8 *flag_bits, u16 *link_widths);
1037static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1038 u8 *device_rev);
1039static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1040static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042 u8 *tx_polarity_inversion,
1043 u8 *rx_polarity_inversion, u8 *max_rate);
1044static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045 unsigned int context, u64 err_status);
1046static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047static void handle_dcc_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049static void handle_lcb_err(struct hfi1_devdata *dd,
1050 unsigned int context, u64 err_status);
1051static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -07001059static void set_partition_keys(struct hfi1_pportdata *ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001060static const char *link_state_name(u32 state);
1061static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1062 u32 state);
1063static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1064 u64 *out_data);
1065static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066static int thermal_init(struct hfi1_devdata *dd);
1067
1068static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1069 int msecs);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -07001070static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1071 int msecs);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001072static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001073static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -07001074static void handle_temp_err(struct hfi1_devdata *dd);
1075static void dc_shutdown(struct hfi1_devdata *dd);
1076static void dc_start(struct hfi1_devdata *dd);
Dean Luick8f000f72016-04-12 11:32:06 -07001077static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1078 unsigned int *np);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07001079static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
Dean Luickec8a1422017-03-20 17:24:39 -07001080static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001081static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001082
1083/*
1084 * Error interrupt table entry. This is used as input to the interrupt
1085 * "clear down" routine used for all second tier error interrupt register.
1086 * Second tier interrupt registers have a single bit representing them
1087 * in the top-level CceIntStatus.
1088 */
1089struct err_reg_info {
1090 u32 status; /* status CSR offset */
1091 u32 clear; /* clear CSR offset */
1092 u32 mask; /* mask CSR offset */
1093 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1094 const char *desc;
1095};
1096
1097#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1098#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1099#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1100
1101/*
1102 * Helpers for building HFI and DC error interrupt table entries. Different
1103 * helpers are needed because of inconsistent register names.
1104 */
1105#define EE(reg, handler, desc) \
1106 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1107 handler, desc }
1108#define DC_EE1(reg, handler, desc) \
1109 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1110#define DC_EE2(reg, handler, desc) \
1111 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1112
1113/*
1114 * Table of the "misc" grouping of error interrupts. Each entry refers to
1115 * another register containing more information.
1116 */
1117static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1118/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1119/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1120/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1121/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1122/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1123/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1124/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1125/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1126 /* the rest are reserved */
1127};
1128
1129/*
1130 * Index into the Various section of the interrupt sources
1131 * corresponding to the Critical Temperature interrupt.
1132 */
1133#define TCRIT_INT_SOURCE 4
1134
1135/*
1136 * SDMA error interrupt entry - refers to another register containing more
1137 * information.
1138 */
1139static const struct err_reg_info sdma_eng_err =
1140 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1141
1142static const struct err_reg_info various_err[NUM_VARIOUS] = {
1143/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1144/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1145/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1146/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1147/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1148 /* rest are reserved */
1149};
1150
1151/*
1152 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1153 * register can not be derived from the MTU value because 10K is not
1154 * a power of 2. Therefore, we need a constant. Everything else can
1155 * be calculated.
1156 */
1157#define DCC_CFG_PORT_MTU_CAP_10240 7
1158
1159/*
1160 * Table of the DC grouping of error interrupts. Each entry refers to
1161 * another register containing more information.
1162 */
1163static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1164/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1165/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1166/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1167/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1168 /* the rest are reserved */
1169};
1170
1171struct cntr_entry {
1172 /*
1173 * counter name
1174 */
1175 char *name;
1176
1177 /*
1178 * csr to read for name (if applicable)
1179 */
1180 u64 csr;
1181
1182 /*
1183 * offset into dd or ppd to store the counter's value
1184 */
1185 int offset;
1186
1187 /*
1188 * flags
1189 */
1190 u8 flags;
1191
1192 /*
1193 * accessor for stat element, context either dd or ppd
1194 */
Jubin John17fb4f22016-02-14 20:21:52 -08001195 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1196 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001197};
1198
1199#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1200#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1201
1202#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1203{ \
1204 name, \
1205 csr, \
1206 offset, \
1207 flags, \
1208 accessor \
1209}
1210
1211/* 32bit RXE */
1212#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1213CNTR_ELEM(#name, \
1214 (counter * 8 + RCV_COUNTER_ARRAY32), \
1215 0, flags | CNTR_32BIT, \
1216 port_access_u32_csr)
1217
1218#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1219CNTR_ELEM(#name, \
1220 (counter * 8 + RCV_COUNTER_ARRAY32), \
1221 0, flags | CNTR_32BIT, \
1222 dev_access_u32_csr)
1223
1224/* 64bit RXE */
1225#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1226CNTR_ELEM(#name, \
1227 (counter * 8 + RCV_COUNTER_ARRAY64), \
1228 0, flags, \
1229 port_access_u64_csr)
1230
1231#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1232CNTR_ELEM(#name, \
1233 (counter * 8 + RCV_COUNTER_ARRAY64), \
1234 0, flags, \
1235 dev_access_u64_csr)
1236
1237#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1238#define OVR_ELM(ctx) \
1239CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001240 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001241 0, CNTR_NORMAL, port_access_u64_csr)
1242
1243/* 32bit TXE */
1244#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1245CNTR_ELEM(#name, \
1246 (counter * 8 + SEND_COUNTER_ARRAY32), \
1247 0, flags | CNTR_32BIT, \
1248 port_access_u32_csr)
1249
1250/* 64bit TXE */
1251#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1252CNTR_ELEM(#name, \
1253 (counter * 8 + SEND_COUNTER_ARRAY64), \
1254 0, flags, \
1255 port_access_u64_csr)
1256
1257# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1258CNTR_ELEM(#name,\
1259 counter * 8 + SEND_COUNTER_ARRAY64, \
1260 0, \
1261 flags, \
1262 dev_access_u64_csr)
1263
1264/* CCE */
1265#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1266CNTR_ELEM(#name, \
1267 (counter * 8 + CCE_COUNTER_ARRAY32), \
1268 0, flags | CNTR_32BIT, \
1269 dev_access_u32_csr)
1270
1271#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1272CNTR_ELEM(#name, \
1273 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1274 0, flags | CNTR_32BIT, \
1275 dev_access_u32_csr)
1276
1277/* DC */
1278#define DC_PERF_CNTR(name, counter, flags) \
1279CNTR_ELEM(#name, \
1280 counter, \
1281 0, \
1282 flags, \
1283 dev_access_u64_csr)
1284
1285#define DC_PERF_CNTR_LCB(name, counter, flags) \
1286CNTR_ELEM(#name, \
1287 counter, \
1288 0, \
1289 flags, \
1290 dc_access_lcb_cntr)
1291
1292/* ibp counters */
1293#define SW_IBP_CNTR(name, cntr) \
1294CNTR_ELEM(#name, \
1295 0, \
1296 0, \
1297 CNTR_SYNTH, \
1298 access_ibp_##cntr)
1299
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001300/**
1301 * hfi_addr_from_offset - return addr for readq/writeq
1302 * @dd - the dd device
1303 * @offset - the offset of the CSR within bar0
1304 *
1305 * This routine selects the appropriate base address
1306 * based on the indicated offset.
1307 */
1308static inline void __iomem *hfi1_addr_from_offset(
1309 const struct hfi1_devdata *dd,
1310 u32 offset)
1311{
1312 if (offset >= dd->base2_start)
1313 return dd->kregbase2 + (offset - dd->base2_start);
1314 return dd->kregbase1 + offset;
1315}
1316
1317/**
1318 * read_csr - read CSR at the indicated offset
1319 * @dd - the dd device
1320 * @offset - the offset of the CSR within bar0
1321 *
1322 * Return: the value read or all FF's if there
1323 * is no mapping
1324 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001325u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1326{
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001327 if (dd->flags & HFI1_PRESENT)
1328 return readq(hfi1_addr_from_offset(dd, offset));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001329 return -1;
1330}
1331
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001332/**
1333 * write_csr - write CSR at the indicated offset
1334 * @dd - the dd device
1335 * @offset - the offset of the CSR within bar0
1336 * @value - value to write
1337 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001338void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1339{
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001340 if (dd->flags & HFI1_PRESENT) {
1341 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1342
1343 /* avoid write to RcvArray */
1344 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1345 return;
1346 writeq(value, base);
1347 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001348}
1349
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001350/**
1351 * get_csr_addr - return te iomem address for offset
1352 * @dd - the dd device
1353 * @offset - the offset of the CSR within bar0
1354 *
1355 * Return: The iomem address to use in subsequent
1356 * writeq/readq operations.
1357 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001358void __iomem *get_csr_addr(
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001359 const struct hfi1_devdata *dd,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001360 u32 offset)
1361{
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07001362 if (dd->flags & HFI1_PRESENT)
1363 return hfi1_addr_from_offset(dd, offset);
1364 return NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001365}
1366
1367static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1368 int mode, u64 value)
1369{
1370 u64 ret;
1371
Mike Marciniszyn77241052015-07-30 15:17:43 -04001372 if (mode == CNTR_MODE_R) {
1373 ret = read_csr(dd, csr);
1374 } else if (mode == CNTR_MODE_W) {
1375 write_csr(dd, csr, value);
1376 ret = value;
1377 } else {
1378 dd_dev_err(dd, "Invalid cntr register access mode");
1379 return 0;
1380 }
1381
1382 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1383 return ret;
1384}
1385
1386/* Dev Access */
1387static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001388 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001389{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301390 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001391 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001392
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001393 if (entry->flags & CNTR_SDMA) {
1394 if (vl == CNTR_INVALID_VL)
1395 return 0;
1396 csr += 0x100 * vl;
1397 } else {
1398 if (vl != CNTR_INVALID_VL)
1399 return 0;
1400 }
1401 return read_write_csr(dd, csr, mode, data);
1402}
1403
1404static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1405 void *context, int idx, int mode, u64 data)
1406{
1407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1408
1409 if (dd->per_sdma && idx < dd->num_sdma)
1410 return dd->per_sdma[idx].err_cnt;
1411 return 0;
1412}
1413
1414static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1415 void *context, int idx, int mode, u64 data)
1416{
1417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1418
1419 if (dd->per_sdma && idx < dd->num_sdma)
1420 return dd->per_sdma[idx].sdma_int_cnt;
1421 return 0;
1422}
1423
1424static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1425 void *context, int idx, int mode, u64 data)
1426{
1427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1428
1429 if (dd->per_sdma && idx < dd->num_sdma)
1430 return dd->per_sdma[idx].idle_int_cnt;
1431 return 0;
1432}
1433
1434static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1435 void *context, int idx, int mode,
1436 u64 data)
1437{
1438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1439
1440 if (dd->per_sdma && idx < dd->num_sdma)
1441 return dd->per_sdma[idx].progress_int_cnt;
1442 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001443}
1444
1445static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001446 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001447{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301448 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001449
1450 u64 val = 0;
1451 u64 csr = entry->csr;
1452
1453 if (entry->flags & CNTR_VL) {
1454 if (vl == CNTR_INVALID_VL)
1455 return 0;
1456 csr += 8 * vl;
1457 } else {
1458 if (vl != CNTR_INVALID_VL)
1459 return 0;
1460 }
1461
1462 val = read_write_csr(dd, csr, mode, data);
1463 return val;
1464}
1465
1466static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001467 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001468{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301469 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001470 u32 csr = entry->csr;
1471 int ret = 0;
1472
1473 if (vl != CNTR_INVALID_VL)
1474 return 0;
1475 if (mode == CNTR_MODE_R)
1476 ret = read_lcb_csr(dd, csr, &data);
1477 else if (mode == CNTR_MODE_W)
1478 ret = write_lcb_csr(dd, csr, data);
1479
1480 if (ret) {
1481 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1482 return 0;
1483 }
1484
1485 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1486 return data;
1487}
1488
1489/* Port Access */
1490static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001491 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001492{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301493 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001494
1495 if (vl != CNTR_INVALID_VL)
1496 return 0;
1497 return read_write_csr(ppd->dd, entry->csr, mode, data);
1498}
1499
1500static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001501 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001502{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301503 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001504 u64 val;
1505 u64 csr = entry->csr;
1506
1507 if (entry->flags & CNTR_VL) {
1508 if (vl == CNTR_INVALID_VL)
1509 return 0;
1510 csr += 8 * vl;
1511 } else {
1512 if (vl != CNTR_INVALID_VL)
1513 return 0;
1514 }
1515 val = read_write_csr(ppd->dd, csr, mode, data);
1516 return val;
1517}
1518
1519/* Software defined */
1520static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1521 u64 data)
1522{
1523 u64 ret;
1524
1525 if (mode == CNTR_MODE_R) {
1526 ret = *cntr;
1527 } else if (mode == CNTR_MODE_W) {
1528 *cntr = data;
1529 ret = data;
1530 } else {
1531 dd_dev_err(dd, "Invalid cntr sw access mode");
1532 return 0;
1533 }
1534
1535 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1536
1537 return ret;
1538}
1539
1540static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001541 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001542{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301543 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001544
1545 if (vl != CNTR_INVALID_VL)
1546 return 0;
1547 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1548}
1549
1550static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001551 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001552{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301553 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001554
1555 if (vl != CNTR_INVALID_VL)
1556 return 0;
1557 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1558}
1559
Dean Luick6d014532015-12-01 15:38:23 -05001560static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1561 void *context, int vl, int mode,
1562 u64 data)
1563{
1564 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1565
1566 if (vl != CNTR_INVALID_VL)
1567 return 0;
1568 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1569}
1570
Mike Marciniszyn77241052015-07-30 15:17:43 -04001571static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001572 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001573{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001574 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1575 u64 zero = 0;
1576 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001577
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001578 if (vl == CNTR_INVALID_VL)
1579 counter = &ppd->port_xmit_discards;
1580 else if (vl >= 0 && vl < C_VL_COUNT)
1581 counter = &ppd->port_xmit_discards_vl[vl];
1582 else
1583 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001584
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001585 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001586}
1587
1588static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001589 void *context, int vl, int mode,
1590 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001591{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301592 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001593
1594 if (vl != CNTR_INVALID_VL)
1595 return 0;
1596
1597 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1598 mode, data);
1599}
1600
1601static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001602 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001603{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301604 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001605
1606 if (vl != CNTR_INVALID_VL)
1607 return 0;
1608
1609 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1610 mode, data);
1611}
1612
1613u64 get_all_cpu_total(u64 __percpu *cntr)
1614{
1615 int cpu;
1616 u64 counter = 0;
1617
1618 for_each_possible_cpu(cpu)
1619 counter += *per_cpu_ptr(cntr, cpu);
1620 return counter;
1621}
1622
1623static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1624 u64 __percpu *cntr,
1625 int vl, int mode, u64 data)
1626{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001627 u64 ret = 0;
1628
1629 if (vl != CNTR_INVALID_VL)
1630 return 0;
1631
1632 if (mode == CNTR_MODE_R) {
1633 ret = get_all_cpu_total(cntr) - *z_val;
1634 } else if (mode == CNTR_MODE_W) {
1635 /* A write can only zero the counter */
1636 if (data == 0)
1637 *z_val = get_all_cpu_total(cntr);
1638 else
1639 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1640 } else {
1641 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1642 return 0;
1643 }
1644
1645 return ret;
1646}
1647
1648static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1649 void *context, int vl, int mode, u64 data)
1650{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301651 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001652
1653 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1654 mode, data);
1655}
1656
1657static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001658 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001659{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301660 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001661
1662 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1663 mode, data);
1664}
1665
1666static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1667 void *context, int vl, int mode, u64 data)
1668{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301669 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001670
1671 return dd->verbs_dev.n_piowait;
1672}
1673
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001674static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1675 void *context, int vl, int mode, u64 data)
1676{
1677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1678
1679 return dd->verbs_dev.n_piodrain;
1680}
1681
Mike Marciniszyn77241052015-07-30 15:17:43 -04001682static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1683 void *context, int vl, int mode, u64 data)
1684{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301685 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001686
1687 return dd->verbs_dev.n_txwait;
1688}
1689
1690static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1691 void *context, int vl, int mode, u64 data)
1692{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301693 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001694
1695 return dd->verbs_dev.n_kmem_wait;
1696}
1697
Dean Luickb4219222015-10-26 10:28:35 -04001698static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001699 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001700{
1701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1702
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001703 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1704 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001705}
1706
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001707/* Software counters for the error status bits within MISC_ERR_STATUS */
1708static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1709 void *context, int vl, int mode,
1710 u64 data)
1711{
1712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1713
1714 return dd->misc_err_status_cnt[12];
1715}
1716
1717static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1718 void *context, int vl, int mode,
1719 u64 data)
1720{
1721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1722
1723 return dd->misc_err_status_cnt[11];
1724}
1725
1726static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1727 void *context, int vl, int mode,
1728 u64 data)
1729{
1730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1731
1732 return dd->misc_err_status_cnt[10];
1733}
1734
1735static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1736 void *context, int vl,
1737 int mode, u64 data)
1738{
1739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1740
1741 return dd->misc_err_status_cnt[9];
1742}
1743
1744static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1745 void *context, int vl, int mode,
1746 u64 data)
1747{
1748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1749
1750 return dd->misc_err_status_cnt[8];
1751}
1752
1753static u64 access_misc_efuse_read_bad_addr_err_cnt(
1754 const struct cntr_entry *entry,
1755 void *context, int vl, int mode, u64 data)
1756{
1757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1758
1759 return dd->misc_err_status_cnt[7];
1760}
1761
1762static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1763 void *context, int vl,
1764 int mode, u64 data)
1765{
1766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1767
1768 return dd->misc_err_status_cnt[6];
1769}
1770
1771static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1772 void *context, int vl, int mode,
1773 u64 data)
1774{
1775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1776
1777 return dd->misc_err_status_cnt[5];
1778}
1779
1780static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1781 void *context, int vl, int mode,
1782 u64 data)
1783{
1784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1785
1786 return dd->misc_err_status_cnt[4];
1787}
1788
1789static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1790 void *context, int vl,
1791 int mode, u64 data)
1792{
1793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1794
1795 return dd->misc_err_status_cnt[3];
1796}
1797
1798static u64 access_misc_csr_write_bad_addr_err_cnt(
1799 const struct cntr_entry *entry,
1800 void *context, int vl, int mode, u64 data)
1801{
1802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1803
1804 return dd->misc_err_status_cnt[2];
1805}
1806
1807static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1808 void *context, int vl,
1809 int mode, u64 data)
1810{
1811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1812
1813 return dd->misc_err_status_cnt[1];
1814}
1815
1816static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1817 void *context, int vl, int mode,
1818 u64 data)
1819{
1820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1821
1822 return dd->misc_err_status_cnt[0];
1823}
1824
1825/*
1826 * Software counter for the aggregate of
1827 * individual CceErrStatus counters
1828 */
1829static u64 access_sw_cce_err_status_aggregated_cnt(
1830 const struct cntr_entry *entry,
1831 void *context, int vl, int mode, u64 data)
1832{
1833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1834
1835 return dd->sw_cce_err_status_aggregate;
1836}
1837
1838/*
1839 * Software counters corresponding to each of the
1840 * error status bits within CceErrStatus
1841 */
1842static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1843 void *context, int vl, int mode,
1844 u64 data)
1845{
1846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1847
1848 return dd->cce_err_status_cnt[40];
1849}
1850
1851static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1852 void *context, int vl, int mode,
1853 u64 data)
1854{
1855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1856
1857 return dd->cce_err_status_cnt[39];
1858}
1859
1860static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1861 void *context, int vl, int mode,
1862 u64 data)
1863{
1864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1865
1866 return dd->cce_err_status_cnt[38];
1867}
1868
1869static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1870 void *context, int vl, int mode,
1871 u64 data)
1872{
1873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1874
1875 return dd->cce_err_status_cnt[37];
1876}
1877
1878static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1879 void *context, int vl, int mode,
1880 u64 data)
1881{
1882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1883
1884 return dd->cce_err_status_cnt[36];
1885}
1886
1887static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1888 const struct cntr_entry *entry,
1889 void *context, int vl, int mode, u64 data)
1890{
1891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1892
1893 return dd->cce_err_status_cnt[35];
1894}
1895
1896static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1897 const struct cntr_entry *entry,
1898 void *context, int vl, int mode, u64 data)
1899{
1900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1901
1902 return dd->cce_err_status_cnt[34];
1903}
1904
1905static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1906 void *context, int vl,
1907 int mode, u64 data)
1908{
1909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1910
1911 return dd->cce_err_status_cnt[33];
1912}
1913
1914static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1915 void *context, int vl, int mode,
1916 u64 data)
1917{
1918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1919
1920 return dd->cce_err_status_cnt[32];
1921}
1922
1923static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1924 void *context, int vl, int mode, u64 data)
1925{
1926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1927
1928 return dd->cce_err_status_cnt[31];
1929}
1930
1931static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1932 void *context, int vl, int mode,
1933 u64 data)
1934{
1935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1936
1937 return dd->cce_err_status_cnt[30];
1938}
1939
1940static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1941 void *context, int vl, int mode,
1942 u64 data)
1943{
1944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1945
1946 return dd->cce_err_status_cnt[29];
1947}
1948
1949static u64 access_pcic_transmit_back_parity_err_cnt(
1950 const struct cntr_entry *entry,
1951 void *context, int vl, int mode, u64 data)
1952{
1953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1954
1955 return dd->cce_err_status_cnt[28];
1956}
1957
1958static u64 access_pcic_transmit_front_parity_err_cnt(
1959 const struct cntr_entry *entry,
1960 void *context, int vl, int mode, u64 data)
1961{
1962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1963
1964 return dd->cce_err_status_cnt[27];
1965}
1966
1967static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1968 void *context, int vl, int mode,
1969 u64 data)
1970{
1971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1972
1973 return dd->cce_err_status_cnt[26];
1974}
1975
1976static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1977 void *context, int vl, int mode,
1978 u64 data)
1979{
1980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1981
1982 return dd->cce_err_status_cnt[25];
1983}
1984
1985static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1986 void *context, int vl, int mode,
1987 u64 data)
1988{
1989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1990
1991 return dd->cce_err_status_cnt[24];
1992}
1993
1994static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1995 void *context, int vl, int mode,
1996 u64 data)
1997{
1998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1999
2000 return dd->cce_err_status_cnt[23];
2001}
2002
2003static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2004 void *context, int vl,
2005 int mode, u64 data)
2006{
2007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2008
2009 return dd->cce_err_status_cnt[22];
2010}
2011
2012static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2013 void *context, int vl, int mode,
2014 u64 data)
2015{
2016 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2017
2018 return dd->cce_err_status_cnt[21];
2019}
2020
2021static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2022 const struct cntr_entry *entry,
2023 void *context, int vl, int mode, u64 data)
2024{
2025 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2026
2027 return dd->cce_err_status_cnt[20];
2028}
2029
2030static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2031 void *context, int vl,
2032 int mode, u64 data)
2033{
2034 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2035
2036 return dd->cce_err_status_cnt[19];
2037}
2038
2039static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2040 void *context, int vl, int mode,
2041 u64 data)
2042{
2043 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2044
2045 return dd->cce_err_status_cnt[18];
2046}
2047
2048static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2049 void *context, int vl, int mode,
2050 u64 data)
2051{
2052 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2053
2054 return dd->cce_err_status_cnt[17];
2055}
2056
2057static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2058 void *context, int vl, int mode,
2059 u64 data)
2060{
2061 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2062
2063 return dd->cce_err_status_cnt[16];
2064}
2065
2066static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2067 void *context, int vl, int mode,
2068 u64 data)
2069{
2070 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2071
2072 return dd->cce_err_status_cnt[15];
2073}
2074
2075static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2076 void *context, int vl,
2077 int mode, u64 data)
2078{
2079 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2080
2081 return dd->cce_err_status_cnt[14];
2082}
2083
2084static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2085 void *context, int vl, int mode,
2086 u64 data)
2087{
2088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2089
2090 return dd->cce_err_status_cnt[13];
2091}
2092
2093static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2094 const struct cntr_entry *entry,
2095 void *context, int vl, int mode, u64 data)
2096{
2097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2098
2099 return dd->cce_err_status_cnt[12];
2100}
2101
2102static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2103 const struct cntr_entry *entry,
2104 void *context, int vl, int mode, u64 data)
2105{
2106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2107
2108 return dd->cce_err_status_cnt[11];
2109}
2110
2111static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2112 const struct cntr_entry *entry,
2113 void *context, int vl, int mode, u64 data)
2114{
2115 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2116
2117 return dd->cce_err_status_cnt[10];
2118}
2119
2120static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2121 const struct cntr_entry *entry,
2122 void *context, int vl, int mode, u64 data)
2123{
2124 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2125
2126 return dd->cce_err_status_cnt[9];
2127}
2128
2129static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2130 const struct cntr_entry *entry,
2131 void *context, int vl, int mode, u64 data)
2132{
2133 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2134
2135 return dd->cce_err_status_cnt[8];
2136}
2137
2138static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2139 void *context, int vl,
2140 int mode, u64 data)
2141{
2142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2143
2144 return dd->cce_err_status_cnt[7];
2145}
2146
2147static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2148 const struct cntr_entry *entry,
2149 void *context, int vl, int mode, u64 data)
2150{
2151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2152
2153 return dd->cce_err_status_cnt[6];
2154}
2155
2156static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2157 void *context, int vl, int mode,
2158 u64 data)
2159{
2160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2161
2162 return dd->cce_err_status_cnt[5];
2163}
2164
2165static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2166 void *context, int vl, int mode,
2167 u64 data)
2168{
2169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2170
2171 return dd->cce_err_status_cnt[4];
2172}
2173
2174static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2175 const struct cntr_entry *entry,
2176 void *context, int vl, int mode, u64 data)
2177{
2178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2179
2180 return dd->cce_err_status_cnt[3];
2181}
2182
2183static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2184 void *context, int vl,
2185 int mode, u64 data)
2186{
2187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2188
2189 return dd->cce_err_status_cnt[2];
2190}
2191
2192static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2193 void *context, int vl,
2194 int mode, u64 data)
2195{
2196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2197
2198 return dd->cce_err_status_cnt[1];
2199}
2200
2201static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2202 void *context, int vl, int mode,
2203 u64 data)
2204{
2205 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2206
2207 return dd->cce_err_status_cnt[0];
2208}
2209
2210/*
2211 * Software counters corresponding to each of the
2212 * error status bits within RcvErrStatus
2213 */
2214static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2215 void *context, int vl, int mode,
2216 u64 data)
2217{
2218 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2219
2220 return dd->rcv_err_status_cnt[63];
2221}
2222
2223static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2224 void *context, int vl,
2225 int mode, u64 data)
2226{
2227 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2228
2229 return dd->rcv_err_status_cnt[62];
2230}
2231
2232static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2233 void *context, int vl, int mode,
2234 u64 data)
2235{
2236 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2237
2238 return dd->rcv_err_status_cnt[61];
2239}
2240
2241static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2242 void *context, int vl, int mode,
2243 u64 data)
2244{
2245 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2246
2247 return dd->rcv_err_status_cnt[60];
2248}
2249
2250static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2251 void *context, int vl,
2252 int mode, u64 data)
2253{
2254 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2255
2256 return dd->rcv_err_status_cnt[59];
2257}
2258
2259static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2260 void *context, int vl,
2261 int mode, u64 data)
2262{
2263 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2264
2265 return dd->rcv_err_status_cnt[58];
2266}
2267
2268static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2269 void *context, int vl, int mode,
2270 u64 data)
2271{
2272 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2273
2274 return dd->rcv_err_status_cnt[57];
2275}
2276
2277static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2278 void *context, int vl, int mode,
2279 u64 data)
2280{
2281 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2282
2283 return dd->rcv_err_status_cnt[56];
2284}
2285
2286static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2287 void *context, int vl, int mode,
2288 u64 data)
2289{
2290 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2291
2292 return dd->rcv_err_status_cnt[55];
2293}
2294
2295static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2296 const struct cntr_entry *entry,
2297 void *context, int vl, int mode, u64 data)
2298{
2299 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2300
2301 return dd->rcv_err_status_cnt[54];
2302}
2303
2304static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2305 const struct cntr_entry *entry,
2306 void *context, int vl, int mode, u64 data)
2307{
2308 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2309
2310 return dd->rcv_err_status_cnt[53];
2311}
2312
2313static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2314 void *context, int vl,
2315 int mode, u64 data)
2316{
2317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2318
2319 return dd->rcv_err_status_cnt[52];
2320}
2321
2322static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2323 void *context, int vl,
2324 int mode, u64 data)
2325{
2326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2327
2328 return dd->rcv_err_status_cnt[51];
2329}
2330
2331static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2332 void *context, int vl,
2333 int mode, u64 data)
2334{
2335 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2336
2337 return dd->rcv_err_status_cnt[50];
2338}
2339
2340static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2341 void *context, int vl,
2342 int mode, u64 data)
2343{
2344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2345
2346 return dd->rcv_err_status_cnt[49];
2347}
2348
2349static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2350 void *context, int vl,
2351 int mode, u64 data)
2352{
2353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2354
2355 return dd->rcv_err_status_cnt[48];
2356}
2357
2358static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2359 void *context, int vl,
2360 int mode, u64 data)
2361{
2362 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2363
2364 return dd->rcv_err_status_cnt[47];
2365}
2366
2367static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2368 void *context, int vl, int mode,
2369 u64 data)
2370{
2371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2372
2373 return dd->rcv_err_status_cnt[46];
2374}
2375
2376static u64 access_rx_hq_intr_csr_parity_err_cnt(
2377 const struct cntr_entry *entry,
2378 void *context, int vl, int mode, u64 data)
2379{
2380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2381
2382 return dd->rcv_err_status_cnt[45];
2383}
2384
2385static u64 access_rx_lookup_csr_parity_err_cnt(
2386 const struct cntr_entry *entry,
2387 void *context, int vl, int mode, u64 data)
2388{
2389 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2390
2391 return dd->rcv_err_status_cnt[44];
2392}
2393
2394static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2395 const struct cntr_entry *entry,
2396 void *context, int vl, int mode, u64 data)
2397{
2398 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2399
2400 return dd->rcv_err_status_cnt[43];
2401}
2402
2403static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2404 const struct cntr_entry *entry,
2405 void *context, int vl, int mode, u64 data)
2406{
2407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2408
2409 return dd->rcv_err_status_cnt[42];
2410}
2411
2412static u64 access_rx_lookup_des_part2_parity_err_cnt(
2413 const struct cntr_entry *entry,
2414 void *context, int vl, int mode, u64 data)
2415{
2416 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2417
2418 return dd->rcv_err_status_cnt[41];
2419}
2420
2421static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2422 const struct cntr_entry *entry,
2423 void *context, int vl, int mode, u64 data)
2424{
2425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2426
2427 return dd->rcv_err_status_cnt[40];
2428}
2429
2430static u64 access_rx_lookup_des_part1_unc_err_cnt(
2431 const struct cntr_entry *entry,
2432 void *context, int vl, int mode, u64 data)
2433{
2434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2435
2436 return dd->rcv_err_status_cnt[39];
2437}
2438
2439static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2440 const struct cntr_entry *entry,
2441 void *context, int vl, int mode, u64 data)
2442{
2443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2444
2445 return dd->rcv_err_status_cnt[38];
2446}
2447
2448static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2449 const struct cntr_entry *entry,
2450 void *context, int vl, int mode, u64 data)
2451{
2452 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2453
2454 return dd->rcv_err_status_cnt[37];
2455}
2456
2457static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2458 const struct cntr_entry *entry,
2459 void *context, int vl, int mode, u64 data)
2460{
2461 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2462
2463 return dd->rcv_err_status_cnt[36];
2464}
2465
2466static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2467 const struct cntr_entry *entry,
2468 void *context, int vl, int mode, u64 data)
2469{
2470 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2471
2472 return dd->rcv_err_status_cnt[35];
2473}
2474
2475static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2476 const struct cntr_entry *entry,
2477 void *context, int vl, int mode, u64 data)
2478{
2479 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2480
2481 return dd->rcv_err_status_cnt[34];
2482}
2483
2484static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2485 const struct cntr_entry *entry,
2486 void *context, int vl, int mode, u64 data)
2487{
2488 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2489
2490 return dd->rcv_err_status_cnt[33];
2491}
2492
2493static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2494 void *context, int vl, int mode,
2495 u64 data)
2496{
2497 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2498
2499 return dd->rcv_err_status_cnt[32];
2500}
2501
2502static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2503 void *context, int vl, int mode,
2504 u64 data)
2505{
2506 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2507
2508 return dd->rcv_err_status_cnt[31];
2509}
2510
2511static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2512 void *context, int vl, int mode,
2513 u64 data)
2514{
2515 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2516
2517 return dd->rcv_err_status_cnt[30];
2518}
2519
2520static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2521 void *context, int vl, int mode,
2522 u64 data)
2523{
2524 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2525
2526 return dd->rcv_err_status_cnt[29];
2527}
2528
2529static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2530 void *context, int vl,
2531 int mode, u64 data)
2532{
2533 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2534
2535 return dd->rcv_err_status_cnt[28];
2536}
2537
2538static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2539 const struct cntr_entry *entry,
2540 void *context, int vl, int mode, u64 data)
2541{
2542 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2543
2544 return dd->rcv_err_status_cnt[27];
2545}
2546
2547static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2548 const struct cntr_entry *entry,
2549 void *context, int vl, int mode, u64 data)
2550{
2551 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2552
2553 return dd->rcv_err_status_cnt[26];
2554}
2555
2556static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2557 const struct cntr_entry *entry,
2558 void *context, int vl, int mode, u64 data)
2559{
2560 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2561
2562 return dd->rcv_err_status_cnt[25];
2563}
2564
2565static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2566 const struct cntr_entry *entry,
2567 void *context, int vl, int mode, u64 data)
2568{
2569 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2570
2571 return dd->rcv_err_status_cnt[24];
2572}
2573
2574static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2575 const struct cntr_entry *entry,
2576 void *context, int vl, int mode, u64 data)
2577{
2578 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2579
2580 return dd->rcv_err_status_cnt[23];
2581}
2582
2583static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2584 const struct cntr_entry *entry,
2585 void *context, int vl, int mode, u64 data)
2586{
2587 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2588
2589 return dd->rcv_err_status_cnt[22];
2590}
2591
2592static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2593 const struct cntr_entry *entry,
2594 void *context, int vl, int mode, u64 data)
2595{
2596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2597
2598 return dd->rcv_err_status_cnt[21];
2599}
2600
2601static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2602 const struct cntr_entry *entry,
2603 void *context, int vl, int mode, u64 data)
2604{
2605 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2606
2607 return dd->rcv_err_status_cnt[20];
2608}
2609
2610static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2611 const struct cntr_entry *entry,
2612 void *context, int vl, int mode, u64 data)
2613{
2614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2615
2616 return dd->rcv_err_status_cnt[19];
2617}
2618
2619static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2620 void *context, int vl,
2621 int mode, u64 data)
2622{
2623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2624
2625 return dd->rcv_err_status_cnt[18];
2626}
2627
2628static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2629 void *context, int vl,
2630 int mode, u64 data)
2631{
2632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2633
2634 return dd->rcv_err_status_cnt[17];
2635}
2636
2637static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2638 const struct cntr_entry *entry,
2639 void *context, int vl, int mode, u64 data)
2640{
2641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2642
2643 return dd->rcv_err_status_cnt[16];
2644}
2645
2646static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2647 const struct cntr_entry *entry,
2648 void *context, int vl, int mode, u64 data)
2649{
2650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2651
2652 return dd->rcv_err_status_cnt[15];
2653}
2654
2655static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2656 void *context, int vl,
2657 int mode, u64 data)
2658{
2659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2660
2661 return dd->rcv_err_status_cnt[14];
2662}
2663
2664static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2665 void *context, int vl,
2666 int mode, u64 data)
2667{
2668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2669
2670 return dd->rcv_err_status_cnt[13];
2671}
2672
2673static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2674 void *context, int vl, int mode,
2675 u64 data)
2676{
2677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2678
2679 return dd->rcv_err_status_cnt[12];
2680}
2681
2682static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2683 void *context, int vl, int mode,
2684 u64 data)
2685{
2686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2687
2688 return dd->rcv_err_status_cnt[11];
2689}
2690
2691static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2692 void *context, int vl, int mode,
2693 u64 data)
2694{
2695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2696
2697 return dd->rcv_err_status_cnt[10];
2698}
2699
2700static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2701 void *context, int vl, int mode,
2702 u64 data)
2703{
2704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2705
2706 return dd->rcv_err_status_cnt[9];
2707}
2708
2709static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2710 void *context, int vl, int mode,
2711 u64 data)
2712{
2713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2714
2715 return dd->rcv_err_status_cnt[8];
2716}
2717
2718static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2719 const struct cntr_entry *entry,
2720 void *context, int vl, int mode, u64 data)
2721{
2722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2723
2724 return dd->rcv_err_status_cnt[7];
2725}
2726
2727static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2728 const struct cntr_entry *entry,
2729 void *context, int vl, int mode, u64 data)
2730{
2731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2732
2733 return dd->rcv_err_status_cnt[6];
2734}
2735
2736static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2737 void *context, int vl, int mode,
2738 u64 data)
2739{
2740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2741
2742 return dd->rcv_err_status_cnt[5];
2743}
2744
2745static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2746 void *context, int vl, int mode,
2747 u64 data)
2748{
2749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2750
2751 return dd->rcv_err_status_cnt[4];
2752}
2753
2754static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2755 void *context, int vl, int mode,
2756 u64 data)
2757{
2758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2759
2760 return dd->rcv_err_status_cnt[3];
2761}
2762
2763static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2764 void *context, int vl, int mode,
2765 u64 data)
2766{
2767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2768
2769 return dd->rcv_err_status_cnt[2];
2770}
2771
2772static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2773 void *context, int vl, int mode,
2774 u64 data)
2775{
2776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2777
2778 return dd->rcv_err_status_cnt[1];
2779}
2780
2781static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2782 void *context, int vl, int mode,
2783 u64 data)
2784{
2785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2786
2787 return dd->rcv_err_status_cnt[0];
2788}
2789
2790/*
2791 * Software counters corresponding to each of the
2792 * error status bits within SendPioErrStatus
2793 */
2794static u64 access_pio_pec_sop_head_parity_err_cnt(
2795 const struct cntr_entry *entry,
2796 void *context, int vl, int mode, u64 data)
2797{
2798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2799
2800 return dd->send_pio_err_status_cnt[35];
2801}
2802
2803static u64 access_pio_pcc_sop_head_parity_err_cnt(
2804 const struct cntr_entry *entry,
2805 void *context, int vl, int mode, u64 data)
2806{
2807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2808
2809 return dd->send_pio_err_status_cnt[34];
2810}
2811
2812static u64 access_pio_last_returned_cnt_parity_err_cnt(
2813 const struct cntr_entry *entry,
2814 void *context, int vl, int mode, u64 data)
2815{
2816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2817
2818 return dd->send_pio_err_status_cnt[33];
2819}
2820
2821static u64 access_pio_current_free_cnt_parity_err_cnt(
2822 const struct cntr_entry *entry,
2823 void *context, int vl, int mode, u64 data)
2824{
2825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2826
2827 return dd->send_pio_err_status_cnt[32];
2828}
2829
2830static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2831 void *context, int vl, int mode,
2832 u64 data)
2833{
2834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2835
2836 return dd->send_pio_err_status_cnt[31];
2837}
2838
2839static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2840 void *context, int vl, int mode,
2841 u64 data)
2842{
2843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2844
2845 return dd->send_pio_err_status_cnt[30];
2846}
2847
2848static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2849 void *context, int vl, int mode,
2850 u64 data)
2851{
2852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2853
2854 return dd->send_pio_err_status_cnt[29];
2855}
2856
2857static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2858 const struct cntr_entry *entry,
2859 void *context, int vl, int mode, u64 data)
2860{
2861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2862
2863 return dd->send_pio_err_status_cnt[28];
2864}
2865
2866static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2867 void *context, int vl, int mode,
2868 u64 data)
2869{
2870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2871
2872 return dd->send_pio_err_status_cnt[27];
2873}
2874
2875static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2876 void *context, int vl, int mode,
2877 u64 data)
2878{
2879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2880
2881 return dd->send_pio_err_status_cnt[26];
2882}
2883
2884static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2885 void *context, int vl,
2886 int mode, u64 data)
2887{
2888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2889
2890 return dd->send_pio_err_status_cnt[25];
2891}
2892
2893static u64 access_pio_block_qw_count_parity_err_cnt(
2894 const struct cntr_entry *entry,
2895 void *context, int vl, int mode, u64 data)
2896{
2897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2898
2899 return dd->send_pio_err_status_cnt[24];
2900}
2901
2902static u64 access_pio_write_qw_valid_parity_err_cnt(
2903 const struct cntr_entry *entry,
2904 void *context, int vl, int mode, u64 data)
2905{
2906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2907
2908 return dd->send_pio_err_status_cnt[23];
2909}
2910
2911static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2912 void *context, int vl, int mode,
2913 u64 data)
2914{
2915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2916
2917 return dd->send_pio_err_status_cnt[22];
2918}
2919
2920static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2921 void *context, int vl,
2922 int mode, u64 data)
2923{
2924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2925
2926 return dd->send_pio_err_status_cnt[21];
2927}
2928
2929static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2930 void *context, int vl,
2931 int mode, u64 data)
2932{
2933 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2934
2935 return dd->send_pio_err_status_cnt[20];
2936}
2937
2938static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2939 void *context, int vl,
2940 int mode, u64 data)
2941{
2942 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2943
2944 return dd->send_pio_err_status_cnt[19];
2945}
2946
2947static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2948 const struct cntr_entry *entry,
2949 void *context, int vl, int mode, u64 data)
2950{
2951 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2952
2953 return dd->send_pio_err_status_cnt[18];
2954}
2955
2956static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2957 void *context, int vl, int mode,
2958 u64 data)
2959{
2960 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2961
2962 return dd->send_pio_err_status_cnt[17];
2963}
2964
2965static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2966 void *context, int vl, int mode,
2967 u64 data)
2968{
2969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2970
2971 return dd->send_pio_err_status_cnt[16];
2972}
2973
2974static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2975 const struct cntr_entry *entry,
2976 void *context, int vl, int mode, u64 data)
2977{
2978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2979
2980 return dd->send_pio_err_status_cnt[15];
2981}
2982
2983static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2984 const struct cntr_entry *entry,
2985 void *context, int vl, int mode, u64 data)
2986{
2987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2988
2989 return dd->send_pio_err_status_cnt[14];
2990}
2991
2992static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2993 const struct cntr_entry *entry,
2994 void *context, int vl, int mode, u64 data)
2995{
2996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2997
2998 return dd->send_pio_err_status_cnt[13];
2999}
3000
3001static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3002 const struct cntr_entry *entry,
3003 void *context, int vl, int mode, u64 data)
3004{
3005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3006
3007 return dd->send_pio_err_status_cnt[12];
3008}
3009
3010static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3011 const struct cntr_entry *entry,
3012 void *context, int vl, int mode, u64 data)
3013{
3014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3015
3016 return dd->send_pio_err_status_cnt[11];
3017}
3018
3019static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3020 const struct cntr_entry *entry,
3021 void *context, int vl, int mode, u64 data)
3022{
3023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3024
3025 return dd->send_pio_err_status_cnt[10];
3026}
3027
3028static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3029 const struct cntr_entry *entry,
3030 void *context, int vl, int mode, u64 data)
3031{
3032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3033
3034 return dd->send_pio_err_status_cnt[9];
3035}
3036
3037static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3038 const struct cntr_entry *entry,
3039 void *context, int vl, int mode, u64 data)
3040{
3041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3042
3043 return dd->send_pio_err_status_cnt[8];
3044}
3045
3046static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3047 const struct cntr_entry *entry,
3048 void *context, int vl, int mode, u64 data)
3049{
3050 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3051
3052 return dd->send_pio_err_status_cnt[7];
3053}
3054
3055static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3056 void *context, int vl, int mode,
3057 u64 data)
3058{
3059 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3060
3061 return dd->send_pio_err_status_cnt[6];
3062}
3063
3064static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3065 void *context, int vl, int mode,
3066 u64 data)
3067{
3068 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3069
3070 return dd->send_pio_err_status_cnt[5];
3071}
3072
3073static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3074 void *context, int vl, int mode,
3075 u64 data)
3076{
3077 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3078
3079 return dd->send_pio_err_status_cnt[4];
3080}
3081
3082static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3083 void *context, int vl, int mode,
3084 u64 data)
3085{
3086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3087
3088 return dd->send_pio_err_status_cnt[3];
3089}
3090
3091static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3092 void *context, int vl, int mode,
3093 u64 data)
3094{
3095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3096
3097 return dd->send_pio_err_status_cnt[2];
3098}
3099
3100static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3101 void *context, int vl,
3102 int mode, u64 data)
3103{
3104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3105
3106 return dd->send_pio_err_status_cnt[1];
3107}
3108
3109static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3110 void *context, int vl, int mode,
3111 u64 data)
3112{
3113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3114
3115 return dd->send_pio_err_status_cnt[0];
3116}
3117
3118/*
3119 * Software counters corresponding to each of the
3120 * error status bits within SendDmaErrStatus
3121 */
3122static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3123 const struct cntr_entry *entry,
3124 void *context, int vl, int mode, u64 data)
3125{
3126 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3127
3128 return dd->send_dma_err_status_cnt[3];
3129}
3130
3131static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3132 const struct cntr_entry *entry,
3133 void *context, int vl, int mode, u64 data)
3134{
3135 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3136
3137 return dd->send_dma_err_status_cnt[2];
3138}
3139
3140static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3141 void *context, int vl, int mode,
3142 u64 data)
3143{
3144 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3145
3146 return dd->send_dma_err_status_cnt[1];
3147}
3148
3149static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3150 void *context, int vl, int mode,
3151 u64 data)
3152{
3153 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3154
3155 return dd->send_dma_err_status_cnt[0];
3156}
3157
3158/*
3159 * Software counters corresponding to each of the
3160 * error status bits within SendEgressErrStatus
3161 */
3162static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3163 const struct cntr_entry *entry,
3164 void *context, int vl, int mode, u64 data)
3165{
3166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3167
3168 return dd->send_egress_err_status_cnt[63];
3169}
3170
3171static u64 access_tx_read_sdma_memory_csr_err_cnt(
3172 const struct cntr_entry *entry,
3173 void *context, int vl, int mode, u64 data)
3174{
3175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3176
3177 return dd->send_egress_err_status_cnt[62];
3178}
3179
3180static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3181 void *context, int vl, int mode,
3182 u64 data)
3183{
3184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3185
3186 return dd->send_egress_err_status_cnt[61];
3187}
3188
3189static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3190 void *context, int vl,
3191 int mode, u64 data)
3192{
3193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3194
3195 return dd->send_egress_err_status_cnt[60];
3196}
3197
3198static u64 access_tx_read_sdma_memory_cor_err_cnt(
3199 const struct cntr_entry *entry,
3200 void *context, int vl, int mode, u64 data)
3201{
3202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3203
3204 return dd->send_egress_err_status_cnt[59];
3205}
3206
3207static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3208 void *context, int vl, int mode,
3209 u64 data)
3210{
3211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3212
3213 return dd->send_egress_err_status_cnt[58];
3214}
3215
3216static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3217 void *context, int vl, int mode,
3218 u64 data)
3219{
3220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3221
3222 return dd->send_egress_err_status_cnt[57];
3223}
3224
3225static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3226 void *context, int vl, int mode,
3227 u64 data)
3228{
3229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3230
3231 return dd->send_egress_err_status_cnt[56];
3232}
3233
3234static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3235 void *context, int vl, int mode,
3236 u64 data)
3237{
3238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3239
3240 return dd->send_egress_err_status_cnt[55];
3241}
3242
3243static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3244 void *context, int vl, int mode,
3245 u64 data)
3246{
3247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3248
3249 return dd->send_egress_err_status_cnt[54];
3250}
3251
3252static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3253 void *context, int vl, int mode,
3254 u64 data)
3255{
3256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3257
3258 return dd->send_egress_err_status_cnt[53];
3259}
3260
3261static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3262 void *context, int vl, int mode,
3263 u64 data)
3264{
3265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3266
3267 return dd->send_egress_err_status_cnt[52];
3268}
3269
3270static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3271 void *context, int vl, int mode,
3272 u64 data)
3273{
3274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3275
3276 return dd->send_egress_err_status_cnt[51];
3277}
3278
3279static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3280 void *context, int vl, int mode,
3281 u64 data)
3282{
3283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3284
3285 return dd->send_egress_err_status_cnt[50];
3286}
3287
3288static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3289 void *context, int vl, int mode,
3290 u64 data)
3291{
3292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3293
3294 return dd->send_egress_err_status_cnt[49];
3295}
3296
3297static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3298 void *context, int vl, int mode,
3299 u64 data)
3300{
3301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3302
3303 return dd->send_egress_err_status_cnt[48];
3304}
3305
3306static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3307 void *context, int vl, int mode,
3308 u64 data)
3309{
3310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3311
3312 return dd->send_egress_err_status_cnt[47];
3313}
3314
3315static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3316 void *context, int vl, int mode,
3317 u64 data)
3318{
3319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3320
3321 return dd->send_egress_err_status_cnt[46];
3322}
3323
3324static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3325 void *context, int vl, int mode,
3326 u64 data)
3327{
3328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3329
3330 return dd->send_egress_err_status_cnt[45];
3331}
3332
3333static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3334 void *context, int vl,
3335 int mode, u64 data)
3336{
3337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3338
3339 return dd->send_egress_err_status_cnt[44];
3340}
3341
3342static u64 access_tx_read_sdma_memory_unc_err_cnt(
3343 const struct cntr_entry *entry,
3344 void *context, int vl, int mode, u64 data)
3345{
3346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3347
3348 return dd->send_egress_err_status_cnt[43];
3349}
3350
3351static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3352 void *context, int vl, int mode,
3353 u64 data)
3354{
3355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3356
3357 return dd->send_egress_err_status_cnt[42];
3358}
3359
3360static u64 access_tx_credit_return_partiy_err_cnt(
3361 const struct cntr_entry *entry,
3362 void *context, int vl, int mode, u64 data)
3363{
3364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3365
3366 return dd->send_egress_err_status_cnt[41];
3367}
3368
3369static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3370 const struct cntr_entry *entry,
3371 void *context, int vl, int mode, u64 data)
3372{
3373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3374
3375 return dd->send_egress_err_status_cnt[40];
3376}
3377
3378static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3379 const struct cntr_entry *entry,
3380 void *context, int vl, int mode, u64 data)
3381{
3382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3383
3384 return dd->send_egress_err_status_cnt[39];
3385}
3386
3387static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3388 const struct cntr_entry *entry,
3389 void *context, int vl, int mode, u64 data)
3390{
3391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3392
3393 return dd->send_egress_err_status_cnt[38];
3394}
3395
3396static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3397 const struct cntr_entry *entry,
3398 void *context, int vl, int mode, u64 data)
3399{
3400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3401
3402 return dd->send_egress_err_status_cnt[37];
3403}
3404
3405static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3406 const struct cntr_entry *entry,
3407 void *context, int vl, int mode, u64 data)
3408{
3409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3410
3411 return dd->send_egress_err_status_cnt[36];
3412}
3413
3414static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3415 const struct cntr_entry *entry,
3416 void *context, int vl, int mode, u64 data)
3417{
3418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3419
3420 return dd->send_egress_err_status_cnt[35];
3421}
3422
3423static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3424 const struct cntr_entry *entry,
3425 void *context, int vl, int mode, u64 data)
3426{
3427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3428
3429 return dd->send_egress_err_status_cnt[34];
3430}
3431
3432static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3433 const struct cntr_entry *entry,
3434 void *context, int vl, int mode, u64 data)
3435{
3436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3437
3438 return dd->send_egress_err_status_cnt[33];
3439}
3440
3441static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3442 const struct cntr_entry *entry,
3443 void *context, int vl, int mode, u64 data)
3444{
3445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3446
3447 return dd->send_egress_err_status_cnt[32];
3448}
3449
3450static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3451 const struct cntr_entry *entry,
3452 void *context, int vl, int mode, u64 data)
3453{
3454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3455
3456 return dd->send_egress_err_status_cnt[31];
3457}
3458
3459static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3460 const struct cntr_entry *entry,
3461 void *context, int vl, int mode, u64 data)
3462{
3463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3464
3465 return dd->send_egress_err_status_cnt[30];
3466}
3467
3468static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3469 const struct cntr_entry *entry,
3470 void *context, int vl, int mode, u64 data)
3471{
3472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3473
3474 return dd->send_egress_err_status_cnt[29];
3475}
3476
3477static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3478 const struct cntr_entry *entry,
3479 void *context, int vl, int mode, u64 data)
3480{
3481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3482
3483 return dd->send_egress_err_status_cnt[28];
3484}
3485
3486static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3487 const struct cntr_entry *entry,
3488 void *context, int vl, int mode, u64 data)
3489{
3490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3491
3492 return dd->send_egress_err_status_cnt[27];
3493}
3494
3495static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3496 const struct cntr_entry *entry,
3497 void *context, int vl, int mode, u64 data)
3498{
3499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3500
3501 return dd->send_egress_err_status_cnt[26];
3502}
3503
3504static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3505 const struct cntr_entry *entry,
3506 void *context, int vl, int mode, u64 data)
3507{
3508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3509
3510 return dd->send_egress_err_status_cnt[25];
3511}
3512
3513static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3514 const struct cntr_entry *entry,
3515 void *context, int vl, int mode, u64 data)
3516{
3517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3518
3519 return dd->send_egress_err_status_cnt[24];
3520}
3521
3522static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3523 const struct cntr_entry *entry,
3524 void *context, int vl, int mode, u64 data)
3525{
3526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3527
3528 return dd->send_egress_err_status_cnt[23];
3529}
3530
3531static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3532 const struct cntr_entry *entry,
3533 void *context, int vl, int mode, u64 data)
3534{
3535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3536
3537 return dd->send_egress_err_status_cnt[22];
3538}
3539
3540static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3541 const struct cntr_entry *entry,
3542 void *context, int vl, int mode, u64 data)
3543{
3544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3545
3546 return dd->send_egress_err_status_cnt[21];
3547}
3548
3549static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3550 const struct cntr_entry *entry,
3551 void *context, int vl, int mode, u64 data)
3552{
3553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3554
3555 return dd->send_egress_err_status_cnt[20];
3556}
3557
3558static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3559 const struct cntr_entry *entry,
3560 void *context, int vl, int mode, u64 data)
3561{
3562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3563
3564 return dd->send_egress_err_status_cnt[19];
3565}
3566
3567static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3568 const struct cntr_entry *entry,
3569 void *context, int vl, int mode, u64 data)
3570{
3571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3572
3573 return dd->send_egress_err_status_cnt[18];
3574}
3575
3576static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3577 const struct cntr_entry *entry,
3578 void *context, int vl, int mode, u64 data)
3579{
3580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3581
3582 return dd->send_egress_err_status_cnt[17];
3583}
3584
3585static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3586 const struct cntr_entry *entry,
3587 void *context, int vl, int mode, u64 data)
3588{
3589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3590
3591 return dd->send_egress_err_status_cnt[16];
3592}
3593
3594static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3595 void *context, int vl, int mode,
3596 u64 data)
3597{
3598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3599
3600 return dd->send_egress_err_status_cnt[15];
3601}
3602
3603static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3604 void *context, int vl,
3605 int mode, u64 data)
3606{
3607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3608
3609 return dd->send_egress_err_status_cnt[14];
3610}
3611
3612static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3613 void *context, int vl, int mode,
3614 u64 data)
3615{
3616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3617
3618 return dd->send_egress_err_status_cnt[13];
3619}
3620
3621static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3622 void *context, int vl, int mode,
3623 u64 data)
3624{
3625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3626
3627 return dd->send_egress_err_status_cnt[12];
3628}
3629
3630static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3631 const struct cntr_entry *entry,
3632 void *context, int vl, int mode, u64 data)
3633{
3634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3635
3636 return dd->send_egress_err_status_cnt[11];
3637}
3638
3639static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3640 void *context, int vl, int mode,
3641 u64 data)
3642{
3643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3644
3645 return dd->send_egress_err_status_cnt[10];
3646}
3647
3648static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3649 void *context, int vl, int mode,
3650 u64 data)
3651{
3652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3653
3654 return dd->send_egress_err_status_cnt[9];
3655}
3656
3657static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3658 const struct cntr_entry *entry,
3659 void *context, int vl, int mode, u64 data)
3660{
3661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3662
3663 return dd->send_egress_err_status_cnt[8];
3664}
3665
3666static u64 access_tx_pio_launch_intf_parity_err_cnt(
3667 const struct cntr_entry *entry,
3668 void *context, int vl, int mode, u64 data)
3669{
3670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3671
3672 return dd->send_egress_err_status_cnt[7];
3673}
3674
3675static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3676 void *context, int vl, int mode,
3677 u64 data)
3678{
3679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3680
3681 return dd->send_egress_err_status_cnt[6];
3682}
3683
3684static u64 access_tx_incorrect_link_state_err_cnt(
3685 const struct cntr_entry *entry,
3686 void *context, int vl, int mode, u64 data)
3687{
3688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3689
3690 return dd->send_egress_err_status_cnt[5];
3691}
3692
3693static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3694 void *context, int vl, int mode,
3695 u64 data)
3696{
3697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3698
3699 return dd->send_egress_err_status_cnt[4];
3700}
3701
3702static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3703 const struct cntr_entry *entry,
3704 void *context, int vl, int mode, u64 data)
3705{
3706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3707
3708 return dd->send_egress_err_status_cnt[3];
3709}
3710
3711static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3712 void *context, int vl, int mode,
3713 u64 data)
3714{
3715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3716
3717 return dd->send_egress_err_status_cnt[2];
3718}
3719
3720static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3721 const struct cntr_entry *entry,
3722 void *context, int vl, int mode, u64 data)
3723{
3724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3725
3726 return dd->send_egress_err_status_cnt[1];
3727}
3728
3729static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3730 const struct cntr_entry *entry,
3731 void *context, int vl, int mode, u64 data)
3732{
3733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3734
3735 return dd->send_egress_err_status_cnt[0];
3736}
3737
3738/*
3739 * Software counters corresponding to each of the
3740 * error status bits within SendErrStatus
3741 */
3742static u64 access_send_csr_write_bad_addr_err_cnt(
3743 const struct cntr_entry *entry,
3744 void *context, int vl, int mode, u64 data)
3745{
3746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3747
3748 return dd->send_err_status_cnt[2];
3749}
3750
3751static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3752 void *context, int vl,
3753 int mode, u64 data)
3754{
3755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3756
3757 return dd->send_err_status_cnt[1];
3758}
3759
3760static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3761 void *context, int vl, int mode,
3762 u64 data)
3763{
3764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3765
3766 return dd->send_err_status_cnt[0];
3767}
3768
3769/*
3770 * Software counters corresponding to each of the
3771 * error status bits within SendCtxtErrStatus
3772 */
3773static u64 access_pio_write_out_of_bounds_err_cnt(
3774 const struct cntr_entry *entry,
3775 void *context, int vl, int mode, u64 data)
3776{
3777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3778
3779 return dd->sw_ctxt_err_status_cnt[4];
3780}
3781
3782static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3783 void *context, int vl, int mode,
3784 u64 data)
3785{
3786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3787
3788 return dd->sw_ctxt_err_status_cnt[3];
3789}
3790
3791static u64 access_pio_write_crosses_boundary_err_cnt(
3792 const struct cntr_entry *entry,
3793 void *context, int vl, int mode, u64 data)
3794{
3795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3796
3797 return dd->sw_ctxt_err_status_cnt[2];
3798}
3799
3800static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3801 void *context, int vl,
3802 int mode, u64 data)
3803{
3804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3805
3806 return dd->sw_ctxt_err_status_cnt[1];
3807}
3808
3809static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3810 void *context, int vl, int mode,
3811 u64 data)
3812{
3813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3814
3815 return dd->sw_ctxt_err_status_cnt[0];
3816}
3817
3818/*
3819 * Software counters corresponding to each of the
3820 * error status bits within SendDmaEngErrStatus
3821 */
3822static u64 access_sdma_header_request_fifo_cor_err_cnt(
3823 const struct cntr_entry *entry,
3824 void *context, int vl, int mode, u64 data)
3825{
3826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3827
3828 return dd->sw_send_dma_eng_err_status_cnt[23];
3829}
3830
3831static u64 access_sdma_header_storage_cor_err_cnt(
3832 const struct cntr_entry *entry,
3833 void *context, int vl, int mode, u64 data)
3834{
3835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3836
3837 return dd->sw_send_dma_eng_err_status_cnt[22];
3838}
3839
3840static u64 access_sdma_packet_tracking_cor_err_cnt(
3841 const struct cntr_entry *entry,
3842 void *context, int vl, int mode, u64 data)
3843{
3844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3845
3846 return dd->sw_send_dma_eng_err_status_cnt[21];
3847}
3848
3849static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3850 void *context, int vl, int mode,
3851 u64 data)
3852{
3853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3854
3855 return dd->sw_send_dma_eng_err_status_cnt[20];
3856}
3857
3858static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3859 void *context, int vl, int mode,
3860 u64 data)
3861{
3862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3863
3864 return dd->sw_send_dma_eng_err_status_cnt[19];
3865}
3866
3867static u64 access_sdma_header_request_fifo_unc_err_cnt(
3868 const struct cntr_entry *entry,
3869 void *context, int vl, int mode, u64 data)
3870{
3871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3872
3873 return dd->sw_send_dma_eng_err_status_cnt[18];
3874}
3875
3876static u64 access_sdma_header_storage_unc_err_cnt(
3877 const struct cntr_entry *entry,
3878 void *context, int vl, int mode, u64 data)
3879{
3880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3881
3882 return dd->sw_send_dma_eng_err_status_cnt[17];
3883}
3884
3885static u64 access_sdma_packet_tracking_unc_err_cnt(
3886 const struct cntr_entry *entry,
3887 void *context, int vl, int mode, u64 data)
3888{
3889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3890
3891 return dd->sw_send_dma_eng_err_status_cnt[16];
3892}
3893
3894static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3895 void *context, int vl, int mode,
3896 u64 data)
3897{
3898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3899
3900 return dd->sw_send_dma_eng_err_status_cnt[15];
3901}
3902
3903static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3904 void *context, int vl, int mode,
3905 u64 data)
3906{
3907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3908
3909 return dd->sw_send_dma_eng_err_status_cnt[14];
3910}
3911
3912static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3913 void *context, int vl, int mode,
3914 u64 data)
3915{
3916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3917
3918 return dd->sw_send_dma_eng_err_status_cnt[13];
3919}
3920
3921static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3922 void *context, int vl, int mode,
3923 u64 data)
3924{
3925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3926
3927 return dd->sw_send_dma_eng_err_status_cnt[12];
3928}
3929
3930static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3931 void *context, int vl, int mode,
3932 u64 data)
3933{
3934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3935
3936 return dd->sw_send_dma_eng_err_status_cnt[11];
3937}
3938
3939static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3940 void *context, int vl, int mode,
3941 u64 data)
3942{
3943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3944
3945 return dd->sw_send_dma_eng_err_status_cnt[10];
3946}
3947
3948static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3949 void *context, int vl, int mode,
3950 u64 data)
3951{
3952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3953
3954 return dd->sw_send_dma_eng_err_status_cnt[9];
3955}
3956
3957static u64 access_sdma_packet_desc_overflow_err_cnt(
3958 const struct cntr_entry *entry,
3959 void *context, int vl, int mode, u64 data)
3960{
3961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3962
3963 return dd->sw_send_dma_eng_err_status_cnt[8];
3964}
3965
3966static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3967 void *context, int vl,
3968 int mode, u64 data)
3969{
3970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3971
3972 return dd->sw_send_dma_eng_err_status_cnt[7];
3973}
3974
3975static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3976 void *context, int vl, int mode, u64 data)
3977{
3978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3979
3980 return dd->sw_send_dma_eng_err_status_cnt[6];
3981}
3982
3983static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3984 void *context, int vl, int mode,
3985 u64 data)
3986{
3987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3988
3989 return dd->sw_send_dma_eng_err_status_cnt[5];
3990}
3991
3992static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3993 void *context, int vl, int mode,
3994 u64 data)
3995{
3996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3997
3998 return dd->sw_send_dma_eng_err_status_cnt[4];
3999}
4000
4001static u64 access_sdma_tail_out_of_bounds_err_cnt(
4002 const struct cntr_entry *entry,
4003 void *context, int vl, int mode, u64 data)
4004{
4005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4006
4007 return dd->sw_send_dma_eng_err_status_cnt[3];
4008}
4009
4010static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4011 void *context, int vl, int mode,
4012 u64 data)
4013{
4014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4015
4016 return dd->sw_send_dma_eng_err_status_cnt[2];
4017}
4018
4019static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4020 void *context, int vl, int mode,
4021 u64 data)
4022{
4023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4024
4025 return dd->sw_send_dma_eng_err_status_cnt[1];
4026}
4027
4028static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4029 void *context, int vl, int mode,
4030 u64 data)
4031{
4032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4033
4034 return dd->sw_send_dma_eng_err_status_cnt[0];
4035}
4036
Jakub Pawlak2b719042016-07-01 16:01:22 -07004037static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4038 void *context, int vl, int mode,
4039 u64 data)
4040{
4041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4042
4043 u64 val = 0;
4044 u64 csr = entry->csr;
4045
4046 val = read_write_csr(dd, csr, mode, data);
4047 if (mode == CNTR_MODE_R) {
4048 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4049 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4050 } else if (mode == CNTR_MODE_W) {
4051 dd->sw_rcv_bypass_packet_errors = 0;
4052 } else {
4053 dd_dev_err(dd, "Invalid cntr register access mode");
4054 return 0;
4055 }
4056 return val;
4057}
4058
Mike Marciniszyn77241052015-07-30 15:17:43 -04004059#define def_access_sw_cpu(cntr) \
4060static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4061 void *context, int vl, int mode, u64 data) \
4062{ \
4063 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004064 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4065 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004066 mode, data); \
4067}
4068
4069def_access_sw_cpu(rc_acks);
4070def_access_sw_cpu(rc_qacks);
4071def_access_sw_cpu(rc_delayed_comp);
4072
4073#define def_access_ibp_counter(cntr) \
4074static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4075 void *context, int vl, int mode, u64 data) \
4076{ \
4077 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4078 \
4079 if (vl != CNTR_INVALID_VL) \
4080 return 0; \
4081 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004082 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004083 mode, data); \
4084}
4085
4086def_access_ibp_counter(loop_pkts);
4087def_access_ibp_counter(rc_resends);
4088def_access_ibp_counter(rnr_naks);
4089def_access_ibp_counter(other_naks);
4090def_access_ibp_counter(rc_timeouts);
4091def_access_ibp_counter(pkt_drops);
4092def_access_ibp_counter(dmawait);
4093def_access_ibp_counter(rc_seqnak);
4094def_access_ibp_counter(rc_dupreq);
4095def_access_ibp_counter(rdma_seq);
4096def_access_ibp_counter(unaligned);
4097def_access_ibp_counter(seq_naks);
4098
4099static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4100[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4101[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4102 CNTR_NORMAL),
4103[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4104 CNTR_NORMAL),
4105[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4106 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4107 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004108[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4109 CNTR_NORMAL),
4110[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4111 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4112[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4113 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4114[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4115 CNTR_NORMAL),
4116[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4117 CNTR_NORMAL),
4118[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4119 CNTR_NORMAL),
4120[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4121 CNTR_NORMAL),
4122[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4123 CNTR_NORMAL),
4124[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4125 CNTR_NORMAL),
4126[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4127 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4128[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4129 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4130[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4131 CNTR_SYNTH),
Jakub Pawlak2b719042016-07-01 16:01:22 -07004132[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4133 access_dc_rcv_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004134[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4135 CNTR_SYNTH),
4136[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4137 CNTR_SYNTH),
4138[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4139 CNTR_SYNTH),
4140[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4141 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4142[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4143 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4144 CNTR_SYNTH),
4145[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4146 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4147[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4148 CNTR_SYNTH),
4149[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4150 CNTR_SYNTH),
4151[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4152 CNTR_SYNTH),
4153[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4154 CNTR_SYNTH),
4155[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4156 CNTR_SYNTH),
4157[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4158 CNTR_SYNTH),
4159[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4160 CNTR_SYNTH),
4161[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4162 CNTR_SYNTH | CNTR_VL),
4163[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4164 CNTR_SYNTH | CNTR_VL),
4165[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4166[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4167 CNTR_SYNTH | CNTR_VL),
4168[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4169[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4170 CNTR_SYNTH | CNTR_VL),
4171[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4172 CNTR_SYNTH),
4173[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4174 CNTR_SYNTH | CNTR_VL),
4175[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4176 CNTR_SYNTH),
4177[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4178 CNTR_SYNTH | CNTR_VL),
4179[C_DC_TOTAL_CRC] =
4180 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4181 CNTR_SYNTH),
4182[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4183 CNTR_SYNTH),
4184[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4185 CNTR_SYNTH),
4186[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4187 CNTR_SYNTH),
4188[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4189 CNTR_SYNTH),
4190[C_DC_CRC_MULT_LN] =
4191 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4192 CNTR_SYNTH),
4193[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4194 CNTR_SYNTH),
4195[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4196 CNTR_SYNTH),
4197[C_DC_SEQ_CRC_CNT] =
4198 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4199 CNTR_SYNTH),
4200[C_DC_ESC0_ONLY_CNT] =
4201 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4202 CNTR_SYNTH),
4203[C_DC_ESC0_PLUS1_CNT] =
4204 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4205 CNTR_SYNTH),
4206[C_DC_ESC0_PLUS2_CNT] =
4207 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4208 CNTR_SYNTH),
4209[C_DC_REINIT_FROM_PEER_CNT] =
4210 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4211 CNTR_SYNTH),
4212[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4213 CNTR_SYNTH),
4214[C_DC_MISC_FLG_CNT] =
4215 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4216 CNTR_SYNTH),
4217[C_DC_PRF_GOOD_LTP_CNT] =
4218 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4219[C_DC_PRF_ACCEPTED_LTP_CNT] =
4220 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4221 CNTR_SYNTH),
4222[C_DC_PRF_RX_FLIT_CNT] =
4223 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4224[C_DC_PRF_TX_FLIT_CNT] =
4225 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4226[C_DC_PRF_CLK_CNTR] =
4227 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4228[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4229 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4230[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4231 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4232 CNTR_SYNTH),
4233[C_DC_PG_STS_TX_SBE_CNT] =
4234 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4235[C_DC_PG_STS_TX_MBE_CNT] =
4236 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4237 CNTR_SYNTH),
4238[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4239 access_sw_cpu_intr),
4240[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4241 access_sw_cpu_rcv_limit),
4242[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4243 access_sw_vtx_wait),
4244[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4245 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004246[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4247 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004248[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4249 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004250[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4251 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004252[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4253 SEND_DMA_DESC_FETCHED_CNT, 0,
4254 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4255 dev_access_u32_csr),
4256[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4257 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4258 access_sde_int_cnt),
4259[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4260 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4261 access_sde_err_cnt),
4262[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4263 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4264 access_sde_idle_int_cnt),
4265[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4266 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4267 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004268/* MISC_ERR_STATUS */
4269[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4270 CNTR_NORMAL,
4271 access_misc_pll_lock_fail_err_cnt),
4272[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4273 CNTR_NORMAL,
4274 access_misc_mbist_fail_err_cnt),
4275[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4276 CNTR_NORMAL,
4277 access_misc_invalid_eep_cmd_err_cnt),
4278[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4279 CNTR_NORMAL,
4280 access_misc_efuse_done_parity_err_cnt),
4281[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4282 CNTR_NORMAL,
4283 access_misc_efuse_write_err_cnt),
4284[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4285 0, CNTR_NORMAL,
4286 access_misc_efuse_read_bad_addr_err_cnt),
4287[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4288 CNTR_NORMAL,
4289 access_misc_efuse_csr_parity_err_cnt),
4290[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4291 CNTR_NORMAL,
4292 access_misc_fw_auth_failed_err_cnt),
4293[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4294 CNTR_NORMAL,
4295 access_misc_key_mismatch_err_cnt),
4296[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4297 CNTR_NORMAL,
4298 access_misc_sbus_write_failed_err_cnt),
4299[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4300 CNTR_NORMAL,
4301 access_misc_csr_write_bad_addr_err_cnt),
4302[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4303 CNTR_NORMAL,
4304 access_misc_csr_read_bad_addr_err_cnt),
4305[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4306 CNTR_NORMAL,
4307 access_misc_csr_parity_err_cnt),
4308/* CceErrStatus */
4309[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4310 CNTR_NORMAL,
4311 access_sw_cce_err_status_aggregated_cnt),
4312[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4313 CNTR_NORMAL,
4314 access_cce_msix_csr_parity_err_cnt),
4315[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4316 CNTR_NORMAL,
4317 access_cce_int_map_unc_err_cnt),
4318[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4319 CNTR_NORMAL,
4320 access_cce_int_map_cor_err_cnt),
4321[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4322 CNTR_NORMAL,
4323 access_cce_msix_table_unc_err_cnt),
4324[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4325 CNTR_NORMAL,
4326 access_cce_msix_table_cor_err_cnt),
4327[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4328 0, CNTR_NORMAL,
4329 access_cce_rxdma_conv_fifo_parity_err_cnt),
4330[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4331 0, CNTR_NORMAL,
4332 access_cce_rcpl_async_fifo_parity_err_cnt),
4333[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_cce_seg_write_bad_addr_err_cnt),
4336[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_cce_seg_read_bad_addr_err_cnt),
4339[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4340 CNTR_NORMAL,
4341 access_la_triggered_cnt),
4342[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_cce_trgt_cpl_timeout_err_cnt),
4345[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_pcic_receive_parity_err_cnt),
4348[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_pcic_transmit_back_parity_err_cnt),
4351[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4352 0, CNTR_NORMAL,
4353 access_pcic_transmit_front_parity_err_cnt),
4354[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_pcic_cpl_dat_q_unc_err_cnt),
4357[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_pcic_cpl_hd_q_unc_err_cnt),
4360[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_pcic_post_dat_q_unc_err_cnt),
4363[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_pcic_post_hd_q_unc_err_cnt),
4366[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_pcic_retry_sot_mem_unc_err_cnt),
4369[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_pcic_retry_mem_unc_err),
4372[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_pcic_n_post_dat_q_parity_err_cnt),
4375[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_pcic_n_post_h_q_parity_err_cnt),
4378[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_pcic_cpl_dat_q_cor_err_cnt),
4381[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4382 CNTR_NORMAL,
4383 access_pcic_cpl_hd_q_cor_err_cnt),
4384[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_pcic_post_dat_q_cor_err_cnt),
4387[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_pcic_post_hd_q_cor_err_cnt),
4390[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_pcic_retry_sot_mem_cor_err_cnt),
4393[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4394 CNTR_NORMAL,
4395 access_pcic_retry_mem_cor_err_cnt),
4396[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4397 "CceCli1AsyncFifoDbgParityError", 0, 0,
4398 CNTR_NORMAL,
4399 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4400[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4401 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4402 CNTR_NORMAL,
4403 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4404 ),
4405[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4406 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4409[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4410 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4411 CNTR_NORMAL,
4412 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4413[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4414 0, CNTR_NORMAL,
4415 access_cce_cli2_async_fifo_parity_err_cnt),
4416[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4417 CNTR_NORMAL,
4418 access_cce_csr_cfg_bus_parity_err_cnt),
4419[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4420 0, CNTR_NORMAL,
4421 access_cce_cli0_async_fifo_parity_err_cnt),
4422[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_cce_rspd_data_parity_err_cnt),
4425[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_cce_trgt_access_err_cnt),
4428[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4429 0, CNTR_NORMAL,
4430 access_cce_trgt_async_fifo_parity_err_cnt),
4431[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4432 CNTR_NORMAL,
4433 access_cce_csr_write_bad_addr_err_cnt),
4434[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_cce_csr_read_bad_addr_err_cnt),
4437[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_ccs_csr_parity_err_cnt),
4440
4441/* RcvErrStatus */
4442[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4443 CNTR_NORMAL,
4444 access_rx_csr_parity_err_cnt),
4445[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4446 CNTR_NORMAL,
4447 access_rx_csr_write_bad_addr_err_cnt),
4448[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4449 CNTR_NORMAL,
4450 access_rx_csr_read_bad_addr_err_cnt),
4451[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4452 CNTR_NORMAL,
4453 access_rx_dma_csr_unc_err_cnt),
4454[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4455 CNTR_NORMAL,
4456 access_rx_dma_dq_fsm_encoding_err_cnt),
4457[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4458 CNTR_NORMAL,
4459 access_rx_dma_eq_fsm_encoding_err_cnt),
4460[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4461 CNTR_NORMAL,
4462 access_rx_dma_csr_parity_err_cnt),
4463[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4464 CNTR_NORMAL,
4465 access_rx_rbuf_data_cor_err_cnt),
4466[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4467 CNTR_NORMAL,
4468 access_rx_rbuf_data_unc_err_cnt),
4469[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4470 CNTR_NORMAL,
4471 access_rx_dma_data_fifo_rd_cor_err_cnt),
4472[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_dma_data_fifo_rd_unc_err_cnt),
4475[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4476 CNTR_NORMAL,
4477 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4478[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4481[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4482 CNTR_NORMAL,
4483 access_rx_rbuf_desc_part2_cor_err_cnt),
4484[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4485 CNTR_NORMAL,
4486 access_rx_rbuf_desc_part2_unc_err_cnt),
4487[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4488 CNTR_NORMAL,
4489 access_rx_rbuf_desc_part1_cor_err_cnt),
4490[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4491 CNTR_NORMAL,
4492 access_rx_rbuf_desc_part1_unc_err_cnt),
4493[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4494 CNTR_NORMAL,
4495 access_rx_hq_intr_fsm_err_cnt),
4496[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4497 CNTR_NORMAL,
4498 access_rx_hq_intr_csr_parity_err_cnt),
4499[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4500 CNTR_NORMAL,
4501 access_rx_lookup_csr_parity_err_cnt),
4502[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4503 CNTR_NORMAL,
4504 access_rx_lookup_rcv_array_cor_err_cnt),
4505[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_lookup_rcv_array_unc_err_cnt),
4508[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4509 0, CNTR_NORMAL,
4510 access_rx_lookup_des_part2_parity_err_cnt),
4511[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4512 0, CNTR_NORMAL,
4513 access_rx_lookup_des_part1_unc_cor_err_cnt),
4514[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4515 CNTR_NORMAL,
4516 access_rx_lookup_des_part1_unc_err_cnt),
4517[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4518 CNTR_NORMAL,
4519 access_rx_rbuf_next_free_buf_cor_err_cnt),
4520[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4521 CNTR_NORMAL,
4522 access_rx_rbuf_next_free_buf_unc_err_cnt),
4523[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4524 "RxRbufFlInitWrAddrParityErr", 0, 0,
4525 CNTR_NORMAL,
4526 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4527[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4528 0, CNTR_NORMAL,
4529 access_rx_rbuf_fl_initdone_parity_err_cnt),
4530[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4531 0, CNTR_NORMAL,
4532 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4533[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4534 CNTR_NORMAL,
4535 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4536[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4537 CNTR_NORMAL,
4538 access_rx_rbuf_empty_err_cnt),
4539[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4540 CNTR_NORMAL,
4541 access_rx_rbuf_full_err_cnt),
4542[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4543 CNTR_NORMAL,
4544 access_rbuf_bad_lookup_err_cnt),
4545[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4546 CNTR_NORMAL,
4547 access_rbuf_ctx_id_parity_err_cnt),
4548[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4549 CNTR_NORMAL,
4550 access_rbuf_csr_qeopdw_parity_err_cnt),
4551[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4552 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4555[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4556 "RxRbufCsrQTlPtrParityErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4559[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4560 0, CNTR_NORMAL,
4561 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4562[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4563 0, CNTR_NORMAL,
4564 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4565[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4566 0, 0, CNTR_NORMAL,
4567 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4568[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4569 0, CNTR_NORMAL,
4570 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4571[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4572 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4573 CNTR_NORMAL,
4574 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4575[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4576 0, CNTR_NORMAL,
4577 access_rx_rbuf_block_list_read_cor_err_cnt),
4578[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4579 0, CNTR_NORMAL,
4580 access_rx_rbuf_block_list_read_unc_err_cnt),
4581[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_rx_rbuf_lookup_des_cor_err_cnt),
4584[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_rx_rbuf_lookup_des_unc_err_cnt),
4587[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4588 "RxRbufLookupDesRegUncCorErr", 0, 0,
4589 CNTR_NORMAL,
4590 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4591[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4592 CNTR_NORMAL,
4593 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4594[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4595 CNTR_NORMAL,
4596 access_rx_rbuf_free_list_cor_err_cnt),
4597[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_rx_rbuf_free_list_unc_err_cnt),
4600[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_rx_rcv_fsm_encoding_err_cnt),
4603[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4604 CNTR_NORMAL,
4605 access_rx_dma_flag_cor_err_cnt),
4606[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4607 CNTR_NORMAL,
4608 access_rx_dma_flag_unc_err_cnt),
4609[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4610 CNTR_NORMAL,
4611 access_rx_dc_sop_eop_parity_err_cnt),
4612[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_rx_rcv_csr_parity_err_cnt),
4615[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4616 CNTR_NORMAL,
4617 access_rx_rcv_qp_map_table_cor_err_cnt),
4618[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4619 CNTR_NORMAL,
4620 access_rx_rcv_qp_map_table_unc_err_cnt),
4621[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4622 CNTR_NORMAL,
4623 access_rx_rcv_data_cor_err_cnt),
4624[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4625 CNTR_NORMAL,
4626 access_rx_rcv_data_unc_err_cnt),
4627[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4628 CNTR_NORMAL,
4629 access_rx_rcv_hdr_cor_err_cnt),
4630[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4631 CNTR_NORMAL,
4632 access_rx_rcv_hdr_unc_err_cnt),
4633[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4634 CNTR_NORMAL,
4635 access_rx_dc_intf_parity_err_cnt),
4636[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4637 CNTR_NORMAL,
4638 access_rx_dma_csr_cor_err_cnt),
4639/* SendPioErrStatus */
4640[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4641 CNTR_NORMAL,
4642 access_pio_pec_sop_head_parity_err_cnt),
4643[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4644 CNTR_NORMAL,
4645 access_pio_pcc_sop_head_parity_err_cnt),
4646[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4647 0, 0, CNTR_NORMAL,
4648 access_pio_last_returned_cnt_parity_err_cnt),
4649[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4650 0, CNTR_NORMAL,
4651 access_pio_current_free_cnt_parity_err_cnt),
4652[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4653 CNTR_NORMAL,
4654 access_pio_reserved_31_err_cnt),
4655[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4656 CNTR_NORMAL,
4657 access_pio_reserved_30_err_cnt),
4658[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4659 CNTR_NORMAL,
4660 access_pio_ppmc_sop_len_err_cnt),
4661[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4662 CNTR_NORMAL,
4663 access_pio_ppmc_bqc_mem_parity_err_cnt),
4664[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4665 CNTR_NORMAL,
4666 access_pio_vl_fifo_parity_err_cnt),
4667[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4668 CNTR_NORMAL,
4669 access_pio_vlf_sop_parity_err_cnt),
4670[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4671 CNTR_NORMAL,
4672 access_pio_vlf_v1_len_parity_err_cnt),
4673[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4674 CNTR_NORMAL,
4675 access_pio_block_qw_count_parity_err_cnt),
4676[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_pio_write_qw_valid_parity_err_cnt),
4679[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_pio_state_machine_err_cnt),
4682[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4683 CNTR_NORMAL,
4684 access_pio_write_data_parity_err_cnt),
4685[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_pio_host_addr_mem_cor_err_cnt),
4688[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4689 CNTR_NORMAL,
4690 access_pio_host_addr_mem_unc_err_cnt),
4691[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4692 CNTR_NORMAL,
4693 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4694[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_pio_init_sm_in_err_cnt),
4697[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_pio_ppmc_pbl_fifo_err_cnt),
4700[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4701 0, CNTR_NORMAL,
4702 access_pio_credit_ret_fifo_parity_err_cnt),
4703[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4704 CNTR_NORMAL,
4705 access_pio_v1_len_mem_bank1_cor_err_cnt),
4706[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4707 CNTR_NORMAL,
4708 access_pio_v1_len_mem_bank0_cor_err_cnt),
4709[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_pio_v1_len_mem_bank1_unc_err_cnt),
4712[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4713 CNTR_NORMAL,
4714 access_pio_v1_len_mem_bank0_unc_err_cnt),
4715[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4716 CNTR_NORMAL,
4717 access_pio_sm_pkt_reset_parity_err_cnt),
4718[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4719 CNTR_NORMAL,
4720 access_pio_pkt_evict_fifo_parity_err_cnt),
4721[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4722 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4723 CNTR_NORMAL,
4724 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4725[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4726 CNTR_NORMAL,
4727 access_pio_sbrdctl_crrel_parity_err_cnt),
4728[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4729 CNTR_NORMAL,
4730 access_pio_pec_fifo_parity_err_cnt),
4731[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4732 CNTR_NORMAL,
4733 access_pio_pcc_fifo_parity_err_cnt),
4734[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4735 CNTR_NORMAL,
4736 access_pio_sb_mem_fifo1_err_cnt),
4737[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4738 CNTR_NORMAL,
4739 access_pio_sb_mem_fifo0_err_cnt),
4740[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4741 CNTR_NORMAL,
4742 access_pio_csr_parity_err_cnt),
4743[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4744 CNTR_NORMAL,
4745 access_pio_write_addr_parity_err_cnt),
4746[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4747 CNTR_NORMAL,
4748 access_pio_write_bad_ctxt_err_cnt),
4749/* SendDmaErrStatus */
4750[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4751 0, CNTR_NORMAL,
4752 access_sdma_pcie_req_tracking_cor_err_cnt),
4753[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4754 0, CNTR_NORMAL,
4755 access_sdma_pcie_req_tracking_unc_err_cnt),
4756[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4757 CNTR_NORMAL,
4758 access_sdma_csr_parity_err_cnt),
4759[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4760 CNTR_NORMAL,
4761 access_sdma_rpy_tag_err_cnt),
4762/* SendEgressErrStatus */
4763[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4764 CNTR_NORMAL,
4765 access_tx_read_pio_memory_csr_unc_err_cnt),
4766[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4767 0, CNTR_NORMAL,
4768 access_tx_read_sdma_memory_csr_err_cnt),
4769[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4770 CNTR_NORMAL,
4771 access_tx_egress_fifo_cor_err_cnt),
4772[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4773 CNTR_NORMAL,
4774 access_tx_read_pio_memory_cor_err_cnt),
4775[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4776 CNTR_NORMAL,
4777 access_tx_read_sdma_memory_cor_err_cnt),
4778[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4779 CNTR_NORMAL,
4780 access_tx_sb_hdr_cor_err_cnt),
4781[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4782 CNTR_NORMAL,
4783 access_tx_credit_overrun_err_cnt),
4784[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4785 CNTR_NORMAL,
4786 access_tx_launch_fifo8_cor_err_cnt),
4787[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4788 CNTR_NORMAL,
4789 access_tx_launch_fifo7_cor_err_cnt),
4790[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4791 CNTR_NORMAL,
4792 access_tx_launch_fifo6_cor_err_cnt),
4793[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4794 CNTR_NORMAL,
4795 access_tx_launch_fifo5_cor_err_cnt),
4796[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4797 CNTR_NORMAL,
4798 access_tx_launch_fifo4_cor_err_cnt),
4799[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4800 CNTR_NORMAL,
4801 access_tx_launch_fifo3_cor_err_cnt),
4802[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_tx_launch_fifo2_cor_err_cnt),
4805[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4806 CNTR_NORMAL,
4807 access_tx_launch_fifo1_cor_err_cnt),
4808[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4809 CNTR_NORMAL,
4810 access_tx_launch_fifo0_cor_err_cnt),
4811[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4812 CNTR_NORMAL,
4813 access_tx_credit_return_vl_err_cnt),
4814[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4815 CNTR_NORMAL,
4816 access_tx_hcrc_insertion_err_cnt),
4817[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4818 CNTR_NORMAL,
4819 access_tx_egress_fifo_unc_err_cnt),
4820[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4821 CNTR_NORMAL,
4822 access_tx_read_pio_memory_unc_err_cnt),
4823[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_tx_read_sdma_memory_unc_err_cnt),
4826[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4827 CNTR_NORMAL,
4828 access_tx_sb_hdr_unc_err_cnt),
4829[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4830 CNTR_NORMAL,
4831 access_tx_credit_return_partiy_err_cnt),
4832[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4833 0, 0, CNTR_NORMAL,
4834 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4835[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4836 0, 0, CNTR_NORMAL,
4837 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4838[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4839 0, 0, CNTR_NORMAL,
4840 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4841[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4842 0, 0, CNTR_NORMAL,
4843 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4844[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4845 0, 0, CNTR_NORMAL,
4846 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4847[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4848 0, 0, CNTR_NORMAL,
4849 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4850[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4851 0, 0, CNTR_NORMAL,
4852 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4853[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4854 0, 0, CNTR_NORMAL,
4855 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4856[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4857 0, 0, CNTR_NORMAL,
4858 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4859[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4860 0, 0, CNTR_NORMAL,
4861 access_tx_sdma15_disallowed_packet_err_cnt),
4862[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4863 0, 0, CNTR_NORMAL,
4864 access_tx_sdma14_disallowed_packet_err_cnt),
4865[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4866 0, 0, CNTR_NORMAL,
4867 access_tx_sdma13_disallowed_packet_err_cnt),
4868[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4869 0, 0, CNTR_NORMAL,
4870 access_tx_sdma12_disallowed_packet_err_cnt),
4871[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4872 0, 0, CNTR_NORMAL,
4873 access_tx_sdma11_disallowed_packet_err_cnt),
4874[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4875 0, 0, CNTR_NORMAL,
4876 access_tx_sdma10_disallowed_packet_err_cnt),
4877[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4878 0, 0, CNTR_NORMAL,
4879 access_tx_sdma9_disallowed_packet_err_cnt),
4880[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4881 0, 0, CNTR_NORMAL,
4882 access_tx_sdma8_disallowed_packet_err_cnt),
4883[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4884 0, 0, CNTR_NORMAL,
4885 access_tx_sdma7_disallowed_packet_err_cnt),
4886[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4887 0, 0, CNTR_NORMAL,
4888 access_tx_sdma6_disallowed_packet_err_cnt),
4889[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4890 0, 0, CNTR_NORMAL,
4891 access_tx_sdma5_disallowed_packet_err_cnt),
4892[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4893 0, 0, CNTR_NORMAL,
4894 access_tx_sdma4_disallowed_packet_err_cnt),
4895[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4896 0, 0, CNTR_NORMAL,
4897 access_tx_sdma3_disallowed_packet_err_cnt),
4898[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4899 0, 0, CNTR_NORMAL,
4900 access_tx_sdma2_disallowed_packet_err_cnt),
4901[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4902 0, 0, CNTR_NORMAL,
4903 access_tx_sdma1_disallowed_packet_err_cnt),
4904[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4905 0, 0, CNTR_NORMAL,
4906 access_tx_sdma0_disallowed_packet_err_cnt),
4907[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_tx_config_parity_err_cnt),
4910[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4911 CNTR_NORMAL,
4912 access_tx_sbrd_ctl_csr_parity_err_cnt),
4913[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4914 CNTR_NORMAL,
4915 access_tx_launch_csr_parity_err_cnt),
4916[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4917 CNTR_NORMAL,
4918 access_tx_illegal_vl_err_cnt),
4919[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4920 "TxSbrdCtlStateMachineParityErr", 0, 0,
4921 CNTR_NORMAL,
4922 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4923[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4924 CNTR_NORMAL,
4925 access_egress_reserved_10_err_cnt),
4926[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4927 CNTR_NORMAL,
4928 access_egress_reserved_9_err_cnt),
4929[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4930 0, 0, CNTR_NORMAL,
4931 access_tx_sdma_launch_intf_parity_err_cnt),
4932[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4933 CNTR_NORMAL,
4934 access_tx_pio_launch_intf_parity_err_cnt),
4935[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4936 CNTR_NORMAL,
4937 access_egress_reserved_6_err_cnt),
4938[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4939 CNTR_NORMAL,
4940 access_tx_incorrect_link_state_err_cnt),
4941[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4942 CNTR_NORMAL,
4943 access_tx_linkdown_err_cnt),
4944[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4945 "EgressFifoUnderrunOrParityErr", 0, 0,
4946 CNTR_NORMAL,
4947 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4948[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4949 CNTR_NORMAL,
4950 access_egress_reserved_2_err_cnt),
4951[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4952 CNTR_NORMAL,
4953 access_tx_pkt_integrity_mem_unc_err_cnt),
4954[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4955 CNTR_NORMAL,
4956 access_tx_pkt_integrity_mem_cor_err_cnt),
4957/* SendErrStatus */
4958[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4959 CNTR_NORMAL,
4960 access_send_csr_write_bad_addr_err_cnt),
4961[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4962 CNTR_NORMAL,
4963 access_send_csr_read_bad_addr_err_cnt),
4964[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4965 CNTR_NORMAL,
4966 access_send_csr_parity_cnt),
4967/* SendCtxtErrStatus */
4968[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4969 CNTR_NORMAL,
4970 access_pio_write_out_of_bounds_err_cnt),
4971[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4972 CNTR_NORMAL,
4973 access_pio_write_overflow_err_cnt),
4974[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4975 0, 0, CNTR_NORMAL,
4976 access_pio_write_crosses_boundary_err_cnt),
4977[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4978 CNTR_NORMAL,
4979 access_pio_disallowed_packet_err_cnt),
4980[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4981 CNTR_NORMAL,
4982 access_pio_inconsistent_sop_err_cnt),
4983/* SendDmaEngErrStatus */
4984[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4985 0, 0, CNTR_NORMAL,
4986 access_sdma_header_request_fifo_cor_err_cnt),
4987[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4988 CNTR_NORMAL,
4989 access_sdma_header_storage_cor_err_cnt),
4990[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4991 CNTR_NORMAL,
4992 access_sdma_packet_tracking_cor_err_cnt),
4993[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4994 CNTR_NORMAL,
4995 access_sdma_assembly_cor_err_cnt),
4996[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4997 CNTR_NORMAL,
4998 access_sdma_desc_table_cor_err_cnt),
4999[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5000 0, 0, CNTR_NORMAL,
5001 access_sdma_header_request_fifo_unc_err_cnt),
5002[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5003 CNTR_NORMAL,
5004 access_sdma_header_storage_unc_err_cnt),
5005[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5006 CNTR_NORMAL,
5007 access_sdma_packet_tracking_unc_err_cnt),
5008[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5009 CNTR_NORMAL,
5010 access_sdma_assembly_unc_err_cnt),
5011[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5012 CNTR_NORMAL,
5013 access_sdma_desc_table_unc_err_cnt),
5014[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5015 CNTR_NORMAL,
5016 access_sdma_timeout_err_cnt),
5017[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5018 CNTR_NORMAL,
5019 access_sdma_header_length_err_cnt),
5020[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5021 CNTR_NORMAL,
5022 access_sdma_header_address_err_cnt),
5023[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5024 CNTR_NORMAL,
5025 access_sdma_header_select_err_cnt),
5026[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5027 CNTR_NORMAL,
5028 access_sdma_reserved_9_err_cnt),
5029[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5030 CNTR_NORMAL,
5031 access_sdma_packet_desc_overflow_err_cnt),
5032[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5033 CNTR_NORMAL,
5034 access_sdma_length_mismatch_err_cnt),
5035[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5036 CNTR_NORMAL,
5037 access_sdma_halt_err_cnt),
5038[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5039 CNTR_NORMAL,
5040 access_sdma_mem_read_err_cnt),
5041[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5042 CNTR_NORMAL,
5043 access_sdma_first_desc_err_cnt),
5044[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5045 CNTR_NORMAL,
5046 access_sdma_tail_out_of_bounds_err_cnt),
5047[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5048 CNTR_NORMAL,
5049 access_sdma_too_long_err_cnt),
5050[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5051 CNTR_NORMAL,
5052 access_sdma_gen_mismatch_err_cnt),
5053[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5054 CNTR_NORMAL,
5055 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005056};
5057
5058static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5059[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5060 CNTR_NORMAL),
5061[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5062 CNTR_NORMAL),
5063[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5064 CNTR_NORMAL),
5065[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5066 CNTR_NORMAL),
5067[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5068 CNTR_NORMAL),
5069[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5070 CNTR_NORMAL),
5071[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5072 CNTR_NORMAL),
5073[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5074[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5075[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5076[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005077 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005078[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005079 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005080[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005081 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005082[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5083[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5084[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005085 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005086[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005087 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05005088[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5089 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005090[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005091 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005092[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08005093 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5094 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005095[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005096 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005097[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005098 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005099[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5100[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5101[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5102[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5103[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5104[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5105[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5106[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5107[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5108[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5109[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5110[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5111[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5112 access_sw_cpu_rc_acks),
5113[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005114 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005115[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005116 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005117[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5118[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5119[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5120[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5121[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5122[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5123[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5124[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5125[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5126[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5127[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5128[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5129[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5130[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5131[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5132[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5133[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5134[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5135[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5136[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5137[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5138[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5139[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5140[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5141[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5142[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5143[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5144[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5145[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5146[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5147[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5148[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5149[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5150[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5151[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5152[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5153[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5154[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5155[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5156[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5157[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5158[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5159[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5160[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5161[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5162[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5163[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5164[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5165[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5166[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5167[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5168[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5169[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5170[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5171[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5172[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5173[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5174[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5175[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5176[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5177[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5178[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5179[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5180[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5181[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5182[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5183[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5184[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5185[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5186[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5187[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5188[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5189[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5190[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5191[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5192[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5193[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5194[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5195[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5196[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5197};
5198
5199/* ======================================================================== */
5200
Mike Marciniszyn77241052015-07-30 15:17:43 -04005201/* return true if this is chip revision revision a */
5202int is_ax(struct hfi1_devdata *dd)
5203{
5204 u8 chip_rev_minor =
5205 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5206 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5207 return (chip_rev_minor & 0xf0) == 0;
5208}
5209
5210/* return true if this is chip revision revision b */
5211int is_bx(struct hfi1_devdata *dd)
5212{
5213 u8 chip_rev_minor =
5214 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5215 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005216 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005217}
5218
5219/*
5220 * Append string s to buffer buf. Arguments curp and len are the current
5221 * position and remaining length, respectively.
5222 *
5223 * return 0 on success, 1 on out of room
5224 */
5225static int append_str(char *buf, char **curp, int *lenp, const char *s)
5226{
5227 char *p = *curp;
5228 int len = *lenp;
5229 int result = 0; /* success */
5230 char c;
5231
5232 /* add a comma, if first in the buffer */
5233 if (p != buf) {
5234 if (len == 0) {
5235 result = 1; /* out of room */
5236 goto done;
5237 }
5238 *p++ = ',';
5239 len--;
5240 }
5241
5242 /* copy the string */
5243 while ((c = *s++) != 0) {
5244 if (len == 0) {
5245 result = 1; /* out of room */
5246 goto done;
5247 }
5248 *p++ = c;
5249 len--;
5250 }
5251
5252done:
5253 /* write return values */
5254 *curp = p;
5255 *lenp = len;
5256
5257 return result;
5258}
5259
5260/*
5261 * Using the given flag table, print a comma separated string into
5262 * the buffer. End in '*' if the buffer is too short.
5263 */
5264static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005265 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005266{
5267 char extra[32];
5268 char *p = buf;
5269 int len = buf_len;
5270 int no_room = 0;
5271 int i;
5272
5273 /* make sure there is at least 2 so we can form "*" */
5274 if (len < 2)
5275 return "";
5276
5277 len--; /* leave room for a nul */
5278 for (i = 0; i < table_size; i++) {
5279 if (flags & table[i].flag) {
5280 no_room = append_str(buf, &p, &len, table[i].str);
5281 if (no_room)
5282 break;
5283 flags &= ~table[i].flag;
5284 }
5285 }
5286
5287 /* any undocumented bits left? */
5288 if (!no_room && flags) {
5289 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5290 no_room = append_str(buf, &p, &len, extra);
5291 }
5292
5293 /* add * if ran out of room */
5294 if (no_room) {
5295 /* may need to back up to add space for a '*' */
5296 if (len == 0)
5297 --p;
5298 *p++ = '*';
5299 }
5300
5301 /* add final nul - space already allocated above */
5302 *p = 0;
5303 return buf;
5304}
5305
5306/* first 8 CCE error interrupt source names */
5307static const char * const cce_misc_names[] = {
5308 "CceErrInt", /* 0 */
5309 "RxeErrInt", /* 1 */
5310 "MiscErrInt", /* 2 */
5311 "Reserved3", /* 3 */
5312 "PioErrInt", /* 4 */
5313 "SDmaErrInt", /* 5 */
5314 "EgressErrInt", /* 6 */
5315 "TxeErrInt" /* 7 */
5316};
5317
5318/*
5319 * Return the miscellaneous error interrupt name.
5320 */
5321static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5322{
5323 if (source < ARRAY_SIZE(cce_misc_names))
5324 strncpy(buf, cce_misc_names[source], bsize);
5325 else
Jubin John17fb4f22016-02-14 20:21:52 -08005326 snprintf(buf, bsize, "Reserved%u",
5327 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005328
5329 return buf;
5330}
5331
5332/*
5333 * Return the SDMA engine error interrupt name.
5334 */
5335static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5336{
5337 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5338 return buf;
5339}
5340
5341/*
5342 * Return the send context error interrupt name.
5343 */
5344static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5345{
5346 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5347 return buf;
5348}
5349
5350static const char * const various_names[] = {
5351 "PbcInt",
5352 "GpioAssertInt",
5353 "Qsfp1Int",
5354 "Qsfp2Int",
5355 "TCritInt"
5356};
5357
5358/*
5359 * Return the various interrupt name.
5360 */
5361static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5362{
5363 if (source < ARRAY_SIZE(various_names))
5364 strncpy(buf, various_names[source], bsize);
5365 else
Jubin John8638b772016-02-14 20:19:24 -08005366 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005367 return buf;
5368}
5369
5370/*
5371 * Return the DC interrupt name.
5372 */
5373static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5374{
5375 static const char * const dc_int_names[] = {
5376 "common",
5377 "lcb",
5378 "8051",
5379 "lbm" /* local block merge */
5380 };
5381
5382 if (source < ARRAY_SIZE(dc_int_names))
5383 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5384 else
5385 snprintf(buf, bsize, "DCInt%u", source);
5386 return buf;
5387}
5388
5389static const char * const sdma_int_names[] = {
5390 "SDmaInt",
5391 "SdmaIdleInt",
5392 "SdmaProgressInt",
5393};
5394
5395/*
5396 * Return the SDMA engine interrupt name.
5397 */
5398static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5399{
5400 /* what interrupt */
5401 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5402 /* which engine */
5403 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5404
5405 if (likely(what < 3))
5406 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5407 else
5408 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5409 return buf;
5410}
5411
5412/*
5413 * Return the receive available interrupt name.
5414 */
5415static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5416{
5417 snprintf(buf, bsize, "RcvAvailInt%u", source);
5418 return buf;
5419}
5420
5421/*
5422 * Return the receive urgent interrupt name.
5423 */
5424static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5425{
5426 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5427 return buf;
5428}
5429
5430/*
5431 * Return the send credit interrupt name.
5432 */
5433static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5434{
5435 snprintf(buf, bsize, "SendCreditInt%u", source);
5436 return buf;
5437}
5438
5439/*
5440 * Return the reserved interrupt name.
5441 */
5442static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5443{
5444 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5445 return buf;
5446}
5447
5448static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5449{
5450 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005451 cce_err_status_flags,
5452 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005453}
5454
5455static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5456{
5457 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005458 rxe_err_status_flags,
5459 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005460}
5461
5462static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5463{
5464 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005465 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005466}
5467
5468static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5469{
5470 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005471 pio_err_status_flags,
5472 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005473}
5474
5475static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5476{
5477 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005478 sdma_err_status_flags,
5479 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005480}
5481
5482static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5483{
5484 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005485 egress_err_status_flags,
5486 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005487}
5488
5489static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5490{
5491 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005492 egress_err_info_flags,
5493 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005494}
5495
5496static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5497{
5498 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005499 send_err_status_flags,
5500 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005501}
5502
5503static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5504{
5505 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005506 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005507
5508 /*
5509 * For most these errors, there is nothing that can be done except
5510 * report or record it.
5511 */
5512 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005513 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005514
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005515 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5516 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005517 /* this error requires a manual drop into SPC freeze mode */
5518 /* then a fix up */
5519 start_freeze_handling(dd->pport, FREEZE_SELF);
5520 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005521
5522 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5523 if (reg & (1ull << i)) {
5524 incr_cntr64(&dd->cce_err_status_cnt[i]);
5525 /* maintain a counter over all cce_err_status errors */
5526 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5527 }
5528 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005529}
5530
5531/*
5532 * Check counters for receive errors that do not have an interrupt
5533 * associated with them.
5534 */
5535#define RCVERR_CHECK_TIME 10
5536static void update_rcverr_timer(unsigned long opaque)
5537{
5538 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5539 struct hfi1_pportdata *ppd = dd->pport;
5540 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5541
5542 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005543 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005544 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005545 set_link_down_reason(
5546 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5547 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Sebastian Sanchez71d47002017-07-29 08:43:49 -07005548 queue_work(ppd->link_wq, &ppd->link_bounce_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005549 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005550 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005551
5552 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5553}
5554
5555static int init_rcverr(struct hfi1_devdata *dd)
5556{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305557 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005558 /* Assume the hardware counter has been reset */
5559 dd->rcv_ovfl_cnt = 0;
5560 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5561}
5562
5563static void free_rcverr(struct hfi1_devdata *dd)
5564{
5565 if (dd->rcverr_timer.data)
5566 del_timer_sync(&dd->rcverr_timer);
5567 dd->rcverr_timer.data = 0;
5568}
5569
5570static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5571{
5572 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005573 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005574
5575 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005576 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005577
5578 if (reg & ALL_RXE_FREEZE_ERR) {
5579 int flags = 0;
5580
5581 /*
5582 * Freeze mode recovery is disabled for the errors
5583 * in RXE_FREEZE_ABORT_MASK
5584 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005585 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005586 flags = FREEZE_ABORT;
5587
5588 start_freeze_handling(dd->pport, flags);
5589 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005590
5591 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5592 if (reg & (1ull << i))
5593 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5594 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005595}
5596
5597static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5598{
5599 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005600 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005601
5602 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005603 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005604 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5605 if (reg & (1ull << i))
5606 incr_cntr64(&dd->misc_err_status_cnt[i]);
5607 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005608}
5609
5610static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5611{
5612 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005613 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005614
5615 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005616 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005617
5618 if (reg & ALL_PIO_FREEZE_ERR)
5619 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005620
5621 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5622 if (reg & (1ull << i))
5623 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5624 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005625}
5626
5627static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5628{
5629 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005630 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005631
5632 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005633 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005634
5635 if (reg & ALL_SDMA_FREEZE_ERR)
5636 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005637
5638 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5639 if (reg & (1ull << i))
5640 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5641 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005642}
5643
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005644static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5645{
5646 incr_cntr64(&ppd->port_xmit_discards);
5647}
5648
Mike Marciniszyn77241052015-07-30 15:17:43 -04005649static void count_port_inactive(struct hfi1_devdata *dd)
5650{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005651 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005652}
5653
5654/*
5655 * We have had a "disallowed packet" error during egress. Determine the
5656 * integrity check which failed, and update relevant error counter, etc.
5657 *
5658 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5659 * bit of state per integrity check, and so we can miss the reason for an
5660 * egress error if more than one packet fails the same integrity check
5661 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5662 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005663static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5664 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005665{
5666 struct hfi1_pportdata *ppd = dd->pport;
5667 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5668 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5669 char buf[96];
5670
5671 /* clear down all observed info as quickly as possible after read */
5672 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5673
5674 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005675 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5676 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005677
5678 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005679 if (info & PORT_DISCARD_EGRESS_ERRS) {
5680 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005681
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005682 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005683 * Count all applicable bits as individual errors and
5684 * attribute them to the packet that triggered this handler.
5685 * This may not be completely accurate due to limitations
5686 * on the available hardware error information. There is
5687 * a single information register and any number of error
5688 * packets may have occurred and contributed to it before
5689 * this routine is called. This means that:
5690 * a) If multiple packets with the same error occur before
5691 * this routine is called, earlier packets are missed.
5692 * There is only a single bit for each error type.
5693 * b) Errors may not be attributed to the correct VL.
5694 * The driver is attributing all bits in the info register
5695 * to the packet that triggered this call, but bits
5696 * could be an accumulation of different packets with
5697 * different VLs.
5698 * c) A single error packet may have multiple counts attached
5699 * to it. There is no way for the driver to know if
5700 * multiple bits set in the info register are due to a
5701 * single packet or multiple packets. The driver assumes
5702 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005703 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005704 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005705 for (i = 0; i < weight; i++) {
5706 __count_port_discards(ppd);
5707 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5708 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5709 else if (vl == 15)
5710 incr_cntr64(&ppd->port_xmit_discards_vl
5711 [C_VL_15]);
5712 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005713 }
5714}
5715
5716/*
5717 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5718 * register. Does it represent a 'port inactive' error?
5719 */
5720static inline int port_inactive_err(u64 posn)
5721{
5722 return (posn >= SEES(TX_LINKDOWN) &&
5723 posn <= SEES(TX_INCORRECT_LINK_STATE));
5724}
5725
5726/*
5727 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5728 * register. Does it represent a 'disallowed packet' error?
5729 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005730static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005731{
5732 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5733 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5734}
5735
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005736/*
5737 * Input value is a bit position of one of the SDMA engine disallowed
5738 * packet errors. Return which engine. Use of this must be guarded by
5739 * disallowed_pkt_err().
5740 */
5741static inline int disallowed_pkt_engine(int posn)
5742{
5743 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5744}
5745
5746/*
5747 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5748 * be done.
5749 */
5750static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5751{
5752 struct sdma_vl_map *m;
5753 int vl;
5754
5755 /* range check */
5756 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5757 return -1;
5758
5759 rcu_read_lock();
5760 m = rcu_dereference(dd->sdma_map);
5761 vl = m->engine_to_vl[engine];
5762 rcu_read_unlock();
5763
5764 return vl;
5765}
5766
5767/*
5768 * Translate the send context (sofware index) into a VL. Return -1 if the
5769 * translation cannot be done.
5770 */
5771static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5772{
5773 struct send_context_info *sci;
5774 struct send_context *sc;
5775 int i;
5776
5777 sci = &dd->send_contexts[sw_index];
5778
5779 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005780 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005781 return -1;
5782
5783 sc = sci->sc;
5784 if (!sc)
5785 return -1;
5786 if (dd->vld[15].sc == sc)
5787 return 15;
5788 for (i = 0; i < num_vls; i++)
5789 if (dd->vld[i].sc == sc)
5790 return i;
5791
5792 return -1;
5793}
5794
Mike Marciniszyn77241052015-07-30 15:17:43 -04005795static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5796{
5797 u64 reg_copy = reg, handled = 0;
5798 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005799 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005800
5801 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5802 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005803 else if (is_ax(dd) &&
5804 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5805 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005806 start_freeze_handling(dd->pport, 0);
5807
5808 while (reg_copy) {
5809 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005810 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005811 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005812 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005813
5814 if (port_inactive_err(shift)) {
5815 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005816 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005817 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005818 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5819
5820 handle_send_egress_err_info(dd, vl);
5821 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005822 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005823 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005824 }
5825
5826 reg &= ~handled;
5827
5828 if (reg)
5829 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005830 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005831
5832 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5833 if (reg & (1ull << i))
5834 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5835 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005836}
5837
5838static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5839{
5840 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005841 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005842
5843 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005844 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005845
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005846 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5847 if (reg & (1ull << i))
5848 incr_cntr64(&dd->send_err_status_cnt[i]);
5849 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005850}
5851
5852/*
5853 * The maximum number of times the error clear down will loop before
5854 * blocking a repeating error. This value is arbitrary.
5855 */
5856#define MAX_CLEAR_COUNT 20
5857
5858/*
5859 * Clear and handle an error register. All error interrupts are funneled
5860 * through here to have a central location to correctly handle single-
5861 * or multi-shot errors.
5862 *
5863 * For non per-context registers, call this routine with a context value
5864 * of 0 so the per-context offset is zero.
5865 *
5866 * If the handler loops too many times, assume that something is wrong
5867 * and can't be fixed, so mask the error bits.
5868 */
5869static void interrupt_clear_down(struct hfi1_devdata *dd,
5870 u32 context,
5871 const struct err_reg_info *eri)
5872{
5873 u64 reg;
5874 u32 count;
5875
5876 /* read in a loop until no more errors are seen */
5877 count = 0;
5878 while (1) {
5879 reg = read_kctxt_csr(dd, context, eri->status);
5880 if (reg == 0)
5881 break;
5882 write_kctxt_csr(dd, context, eri->clear, reg);
5883 if (likely(eri->handler))
5884 eri->handler(dd, context, reg);
5885 count++;
5886 if (count > MAX_CLEAR_COUNT) {
5887 u64 mask;
5888
5889 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005890 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005891 /*
5892 * Read-modify-write so any other masked bits
5893 * remain masked.
5894 */
5895 mask = read_kctxt_csr(dd, context, eri->mask);
5896 mask &= ~reg;
5897 write_kctxt_csr(dd, context, eri->mask, mask);
5898 break;
5899 }
5900 }
5901}
5902
5903/*
5904 * CCE block "misc" interrupt. Source is < 16.
5905 */
5906static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5907{
5908 const struct err_reg_info *eri = &misc_errs[source];
5909
5910 if (eri->handler) {
5911 interrupt_clear_down(dd, 0, eri);
5912 } else {
5913 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005914 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005915 }
5916}
5917
5918static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5919{
5920 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005921 sc_err_status_flags,
5922 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005923}
5924
5925/*
5926 * Send context error interrupt. Source (hw_context) is < 160.
5927 *
5928 * All send context errors cause the send context to halt. The normal
5929 * clear-down mechanism cannot be used because we cannot clear the
5930 * error bits until several other long-running items are done first.
5931 * This is OK because with the context halted, nothing else is going
5932 * to happen on it anyway.
5933 */
5934static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5935 unsigned int hw_context)
5936{
5937 struct send_context_info *sci;
5938 struct send_context *sc;
5939 char flags[96];
5940 u64 status;
5941 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005942 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005943
5944 sw_index = dd->hw_to_sw[hw_context];
5945 if (sw_index >= dd->num_send_contexts) {
5946 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005947 "out of range sw index %u for send context %u\n",
5948 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005949 return;
5950 }
5951 sci = &dd->send_contexts[sw_index];
5952 sc = sci->sc;
5953 if (!sc) {
5954 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005955 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005956 return;
5957 }
5958
5959 /* tell the software that a halt has begun */
5960 sc_stop(sc, SCF_HALTED);
5961
5962 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5963
5964 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005965 send_context_err_status_string(flags, sizeof(flags),
5966 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005967
5968 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005969 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005970
5971 /*
5972 * Automatically restart halted kernel contexts out of interrupt
5973 * context. User contexts must ask the driver to restart the context.
5974 */
5975 if (sc->type != SC_USER)
5976 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005977
5978 /*
5979 * Update the counters for the corresponding status bits.
5980 * Note that these particular counters are aggregated over all
5981 * 160 contexts.
5982 */
5983 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5984 if (status & (1ull << i))
5985 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5986 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005987}
5988
5989static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5990 unsigned int source, u64 status)
5991{
5992 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005993 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005994
5995 sde = &dd->per_sdma[source];
5996#ifdef CONFIG_SDMA_VERBOSITY
5997 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5998 slashstrip(__FILE__), __LINE__, __func__);
5999 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6000 sde->this_idx, source, (unsigned long long)status);
6001#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05006002 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006003 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05006004
6005 /*
6006 * Update the counters for the corresponding status bits.
6007 * Note that these particular counters are aggregated over
6008 * all 16 DMA engines.
6009 */
6010 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6011 if (status & (1ull << i))
6012 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6013 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006014}
6015
6016/*
6017 * CCE block SDMA error interrupt. Source is < 16.
6018 */
6019static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6020{
6021#ifdef CONFIG_SDMA_VERBOSITY
6022 struct sdma_engine *sde = &dd->per_sdma[source];
6023
6024 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6025 slashstrip(__FILE__), __LINE__, __func__);
6026 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6027 source);
6028 sdma_dumpstate(sde);
6029#endif
6030 interrupt_clear_down(dd, source, &sdma_eng_err);
6031}
6032
6033/*
6034 * CCE block "various" interrupt. Source is < 8.
6035 */
6036static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6037{
6038 const struct err_reg_info *eri = &various_err[source];
6039
6040 /*
6041 * TCritInt cannot go through interrupt_clear_down()
6042 * because it is not a second tier interrupt. The handler
6043 * should be called directly.
6044 */
6045 if (source == TCRIT_INT_SOURCE)
6046 handle_temp_err(dd);
6047 else if (eri->handler)
6048 interrupt_clear_down(dd, 0, eri);
6049 else
6050 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006051 "%s: Unimplemented/reserved interrupt %d\n",
6052 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006053}
6054
6055static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6056{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006057 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006058 struct hfi1_pportdata *ppd = dd->pport;
6059 unsigned long flags;
6060 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6061
6062 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006063 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006064 dd_dev_info(dd, "%s: QSFP module removed\n",
6065 __func__);
6066
Mike Marciniszyn77241052015-07-30 15:17:43 -04006067 ppd->driver_link_ready = 0;
6068 /*
6069 * Cable removed, reset all our information about the
6070 * cache and cable capabilities
6071 */
6072
6073 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6074 /*
6075 * We don't set cache_refresh_required here as we expect
6076 * an interrupt when a cable is inserted
6077 */
6078 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006079 ppd->qsfp_info.reset_needed = 0;
6080 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006081 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006082 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006083 /* Invert the ModPresent pin now to detect plug-in */
6084 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6085 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006086
6087 if ((ppd->offline_disabled_reason >
6088 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006089 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08006090 (ppd->offline_disabled_reason ==
6091 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6092 ppd->offline_disabled_reason =
6093 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006094 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006095
Mike Marciniszyn77241052015-07-30 15:17:43 -04006096 if (ppd->host_link_state == HLS_DN_POLL) {
6097 /*
6098 * The link is still in POLL. This means
6099 * that the normal link down processing
6100 * will not happen. We have to do it here
6101 * before turning the DC off.
6102 */
Sebastian Sanchez71d47002017-07-29 08:43:49 -07006103 queue_work(ppd->link_wq, &ppd->link_down_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006104 }
6105 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006106 dd_dev_info(dd, "%s: QSFP module inserted\n",
6107 __func__);
6108
Mike Marciniszyn77241052015-07-30 15:17:43 -04006109 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6110 ppd->qsfp_info.cache_valid = 0;
6111 ppd->qsfp_info.cache_refresh_required = 1;
6112 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006113 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006114
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006115 /*
6116 * Stop inversion of ModPresent pin to detect
6117 * removal of the cable
6118 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006119 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006120 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6121 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6122
6123 ppd->offline_disabled_reason =
6124 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006125 }
6126 }
6127
6128 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006129 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006130 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006131 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6132 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006133 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6134 }
6135
6136 /* Schedule the QSFP work only if there is a cable attached. */
6137 if (qsfp_mod_present(ppd))
Sebastian Sanchez71d47002017-07-29 08:43:49 -07006138 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006139}
6140
6141static int request_host_lcb_access(struct hfi1_devdata *dd)
6142{
6143 int ret;
6144
6145 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006146 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6147 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006148 if (ret != HCMD_SUCCESS) {
6149 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006150 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006151 }
6152 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6153}
6154
6155static int request_8051_lcb_access(struct hfi1_devdata *dd)
6156{
6157 int ret;
6158
6159 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006160 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6161 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006162 if (ret != HCMD_SUCCESS) {
6163 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006164 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006165 }
6166 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6167}
6168
6169/*
6170 * Set the LCB selector - allow host access. The DCC selector always
6171 * points to the host.
6172 */
6173static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6174{
6175 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006176 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6177 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006178}
6179
6180/*
6181 * Clear the LCB selector - allow 8051 access. The DCC selector always
6182 * points to the host.
6183 */
6184static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6185{
6186 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006187 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006188}
6189
6190/*
6191 * Acquire LCB access from the 8051. If the host already has access,
6192 * just increment a counter. Otherwise, inform the 8051 that the
6193 * host is taking access.
6194 *
6195 * Returns:
6196 * 0 on success
6197 * -EBUSY if the 8051 has control and cannot be disturbed
6198 * -errno if unable to acquire access from the 8051
6199 */
6200int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6201{
6202 struct hfi1_pportdata *ppd = dd->pport;
6203 int ret = 0;
6204
6205 /*
6206 * Use the host link state lock so the operation of this routine
6207 * { link state check, selector change, count increment } can occur
6208 * as a unit against a link state change. Otherwise there is a
6209 * race between the state change and the count increment.
6210 */
6211 if (sleep_ok) {
6212 mutex_lock(&ppd->hls_lock);
6213 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006214 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006215 udelay(1);
6216 }
6217
6218 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006219 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006220 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006221 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006222 ret = -EBUSY;
6223 goto done;
6224 }
6225
6226 if (dd->lcb_access_count == 0) {
6227 ret = request_host_lcb_access(dd);
6228 if (ret) {
6229 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006230 "%s: unable to acquire LCB access, err %d\n",
6231 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006232 goto done;
6233 }
6234 set_host_lcb_access(dd);
6235 }
6236 dd->lcb_access_count++;
6237done:
6238 mutex_unlock(&ppd->hls_lock);
6239 return ret;
6240}
6241
6242/*
6243 * Release LCB access by decrementing the use count. If the count is moving
6244 * from 1 to 0, inform 8051 that it has control back.
6245 *
6246 * Returns:
6247 * 0 on success
6248 * -errno if unable to release access to the 8051
6249 */
6250int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6251{
6252 int ret = 0;
6253
6254 /*
6255 * Use the host link state lock because the acquire needed it.
6256 * Here, we only need to keep { selector change, count decrement }
6257 * as a unit.
6258 */
6259 if (sleep_ok) {
6260 mutex_lock(&dd->pport->hls_lock);
6261 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006262 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006263 udelay(1);
6264 }
6265
6266 if (dd->lcb_access_count == 0) {
6267 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006268 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006269 goto done;
6270 }
6271
6272 if (dd->lcb_access_count == 1) {
6273 set_8051_lcb_access(dd);
6274 ret = request_8051_lcb_access(dd);
6275 if (ret) {
6276 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006277 "%s: unable to release LCB access, err %d\n",
6278 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006279 /* restore host access if the grant didn't work */
6280 set_host_lcb_access(dd);
6281 goto done;
6282 }
6283 }
6284 dd->lcb_access_count--;
6285done:
6286 mutex_unlock(&dd->pport->hls_lock);
6287 return ret;
6288}
6289
6290/*
6291 * Initialize LCB access variables and state. Called during driver load,
6292 * after most of the initialization is finished.
6293 *
6294 * The DC default is LCB access on for the host. The driver defaults to
6295 * leaving access to the 8051. Assign access now - this constrains the call
6296 * to this routine to be after all LCB set-up is done. In particular, after
6297 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6298 */
6299static void init_lcb_access(struct hfi1_devdata *dd)
6300{
6301 dd->lcb_access_count = 0;
6302}
6303
6304/*
6305 * Write a response back to a 8051 request.
6306 */
6307static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6308{
6309 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006310 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6311 (u64)return_code <<
6312 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6313 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006314}
6315
6316/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006317 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006318 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006319static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006320{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006321 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006322 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006323 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006324 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006325
6326 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6327 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6328 return; /* no request */
6329
6330 /* zero out COMPLETED so the response is seen */
6331 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6332
6333 /* extract request details */
6334 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6335 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6336 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6337 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6338
6339 switch (type) {
6340 case HREQ_LOAD_CONFIG:
6341 case HREQ_SAVE_CONFIG:
6342 case HREQ_READ_CONFIG:
6343 case HREQ_SET_TX_EQ_ABS:
6344 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006345 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006346 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006347 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006348 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6349 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006350 case HREQ_CONFIG_DONE:
6351 hreq_response(dd, HREQ_SUCCESS, 0);
6352 break;
6353
6354 case HREQ_INTERFACE_TEST:
6355 hreq_response(dd, HREQ_SUCCESS, data);
6356 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006357 default:
6358 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6359 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6360 break;
6361 }
6362}
6363
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006364/*
6365 * Set up allocation unit vaulue.
6366 */
6367void set_up_vau(struct hfi1_devdata *dd, u8 vau)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006368{
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006369 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6370
6371 /* do not modify other values in the register */
6372 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6373 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6374 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006375}
6376
6377/*
6378 * Set up initial VL15 credits of the remote. Assumes the rest of
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006379 * the CM credit registers are zero from a previous global or credit reset.
6380 * Shared limit for VL15 will always be 0.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006381 */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006382void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006383{
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006384 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6385
6386 /* set initial values for total and shared credit limit */
6387 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6388 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6389
6390 /*
6391 * Set total limit to be equal to VL15 credits.
6392 * Leave shared limit at 0.
6393 */
6394 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6395 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006396
Dennis Dalessandroeacc8302016-10-17 04:19:52 -07006397 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6398 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006399}
6400
6401/*
6402 * Zero all credit details from the previous connection and
6403 * reset the CM manager's internal counters.
6404 */
6405void reset_link_credits(struct hfi1_devdata *dd)
6406{
6407 int i;
6408
6409 /* remove all previous VL credit limits */
6410 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006411 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006412 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006413 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006414 /* reset the CM block */
6415 pio_send_control(dd, PSC_CM_RESET);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006416 /* reset cached value */
6417 dd->vl15buf_cached = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006418}
6419
6420/* convert a vCU to a CU */
6421static u32 vcu_to_cu(u8 vcu)
6422{
6423 return 1 << vcu;
6424}
6425
6426/* convert a CU to a vCU */
6427static u8 cu_to_vcu(u32 cu)
6428{
6429 return ilog2(cu);
6430}
6431
6432/* convert a vAU to an AU */
6433static u32 vau_to_au(u8 vau)
6434{
6435 return 8 * (1 << vau);
6436}
6437
6438static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6439{
6440 ppd->sm_trap_qp = 0x0;
6441 ppd->sa_qp = 0x1;
6442}
6443
6444/*
6445 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6446 */
6447static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6448{
6449 u64 reg;
6450
6451 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6452 write_csr(dd, DC_LCB_CFG_RUN, 0);
6453 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6454 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006455 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006456 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6457 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6458 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006459 write_csr(dd, DCC_CFG_RESET, reg |
6460 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6461 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006462 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006463 if (!abort) {
6464 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6465 write_csr(dd, DCC_CFG_RESET, reg);
6466 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6467 }
6468}
6469
6470/*
6471 * This routine should be called after the link has been transitioned to
6472 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6473 * reset).
6474 *
6475 * The expectation is that the caller of this routine would have taken
6476 * care of properly transitioning the link into the correct state.
Tadeusz Struk22546b72017-04-28 10:40:02 -07006477 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6478 * before calling this function.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006479 */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006480static void _dc_shutdown(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006481{
Tadeusz Struk22546b72017-04-28 10:40:02 -07006482 lockdep_assert_held(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006483
Tadeusz Struk22546b72017-04-28 10:40:02 -07006484 if (dd->dc_shutdown)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006485 return;
Tadeusz Struk22546b72017-04-28 10:40:02 -07006486
Mike Marciniszyn77241052015-07-30 15:17:43 -04006487 dd->dc_shutdown = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006488 /* Shutdown the LCB */
6489 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006490 /*
6491 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006492 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006493 * itself.
6494 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006495 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6496}
6497
Tadeusz Struk22546b72017-04-28 10:40:02 -07006498static void dc_shutdown(struct hfi1_devdata *dd)
6499{
6500 mutex_lock(&dd->dc8051_lock);
6501 _dc_shutdown(dd);
6502 mutex_unlock(&dd->dc8051_lock);
6503}
6504
Jubin John4d114fd2016-02-14 20:21:43 -08006505/*
6506 * Calling this after the DC has been brought out of reset should not
6507 * do any damage.
Tadeusz Struk22546b72017-04-28 10:40:02 -07006508 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6509 * before calling this function.
Jubin John4d114fd2016-02-14 20:21:43 -08006510 */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006511static void _dc_start(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006512{
Tadeusz Struk22546b72017-04-28 10:40:02 -07006513 lockdep_assert_held(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006514
Mike Marciniszyn77241052015-07-30 15:17:43 -04006515 if (!dd->dc_shutdown)
Tadeusz Struk22546b72017-04-28 10:40:02 -07006516 return;
6517
Mike Marciniszyn77241052015-07-30 15:17:43 -04006518 /* Take the 8051 out of reset */
6519 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6520 /* Wait until 8051 is ready */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006521 if (wait_fm_ready(dd, TIMEOUT_8051_START))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006522 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006523 __func__);
Tadeusz Struk22546b72017-04-28 10:40:02 -07006524
Mike Marciniszyn77241052015-07-30 15:17:43 -04006525 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6526 write_csr(dd, DCC_CFG_RESET, 0x10);
6527 /* lcb_shutdown() with abort=1 does not restore these */
6528 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006529 dd->dc_shutdown = 0;
Tadeusz Struk22546b72017-04-28 10:40:02 -07006530}
6531
6532static void dc_start(struct hfi1_devdata *dd)
6533{
6534 mutex_lock(&dd->dc8051_lock);
6535 _dc_start(dd);
6536 mutex_unlock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006537}
6538
6539/*
6540 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6541 */
6542static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6543{
6544 u64 rx_radr, tx_radr;
6545 u32 version;
6546
6547 if (dd->icode != ICODE_FPGA_EMULATION)
6548 return;
6549
6550 /*
6551 * These LCB defaults on emulator _s are good, nothing to do here:
6552 * LCB_CFG_TX_FIFOS_RADR
6553 * LCB_CFG_RX_FIFOS_RADR
6554 * LCB_CFG_LN_DCLK
6555 * LCB_CFG_IGNORE_LOST_RCLK
6556 */
6557 if (is_emulator_s(dd))
6558 return;
6559 /* else this is _p */
6560
6561 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006562 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006563 version = 0x2d; /* all B0 use 0x2d or higher settings */
6564
6565 if (version <= 0x12) {
6566 /* release 0x12 and below */
6567
6568 /*
6569 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6570 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6571 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6572 */
6573 rx_radr =
6574 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6575 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6576 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6577 /*
6578 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6579 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6580 */
6581 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6582 } else if (version <= 0x18) {
6583 /* release 0x13 up to 0x18 */
6584 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6585 rx_radr =
6586 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6587 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6588 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6589 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6590 } else if (version == 0x19) {
6591 /* release 0x19 */
6592 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6593 rx_radr =
6594 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6595 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6596 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6597 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6598 } else if (version == 0x1a) {
6599 /* release 0x1a */
6600 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6601 rx_radr =
6602 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6603 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6604 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6605 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6606 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6607 } else {
6608 /* release 0x1b and higher */
6609 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6610 rx_radr =
6611 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6612 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6613 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6614 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6615 }
6616
6617 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6618 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6619 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006620 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006621 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6622}
6623
6624/*
6625 * Handle a SMA idle message
6626 *
6627 * This is a work-queue function outside of the interrupt.
6628 */
6629void handle_sma_message(struct work_struct *work)
6630{
6631 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6632 sma_message_work);
6633 struct hfi1_devdata *dd = ppd->dd;
6634 u64 msg;
6635 int ret;
6636
Jubin John4d114fd2016-02-14 20:21:43 -08006637 /*
6638 * msg is bytes 1-4 of the 40-bit idle message - the command code
6639 * is stripped off
6640 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006641 ret = read_idle_sma(dd, &msg);
6642 if (ret)
6643 return;
6644 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6645 /*
6646 * React to the SMA message. Byte[1] (0 for us) is the command.
6647 */
6648 switch (msg & 0xff) {
6649 case SMA_IDLE_ARM:
6650 /*
6651 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6652 * State Transitions
6653 *
6654 * Only expected in INIT or ARMED, discard otherwise.
6655 */
6656 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6657 ppd->neighbor_normal = 1;
6658 break;
6659 case SMA_IDLE_ACTIVE:
6660 /*
6661 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6662 * State Transitions
6663 *
6664 * Can activate the node. Discard otherwise.
6665 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006666 if (ppd->host_link_state == HLS_UP_ARMED &&
6667 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006668 ppd->neighbor_normal = 1;
6669 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6670 if (ret)
6671 dd_dev_err(
6672 dd,
6673 "%s: received Active SMA idle message, couldn't set link to Active\n",
6674 __func__);
6675 }
6676 break;
6677 default:
6678 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006679 "%s: received unexpected SMA idle message 0x%llx\n",
6680 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006681 break;
6682 }
6683}
6684
6685static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6686{
6687 u64 rcvctrl;
6688 unsigned long flags;
6689
6690 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6691 rcvctrl = read_csr(dd, RCV_CTRL);
6692 rcvctrl |= add;
6693 rcvctrl &= ~clear;
6694 write_csr(dd, RCV_CTRL, rcvctrl);
6695 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6696}
6697
6698static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6699{
6700 adjust_rcvctrl(dd, add, 0);
6701}
6702
6703static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6704{
6705 adjust_rcvctrl(dd, 0, clear);
6706}
6707
6708/*
6709 * Called from all interrupt handlers to start handling an SPC freeze.
6710 */
6711void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6712{
6713 struct hfi1_devdata *dd = ppd->dd;
6714 struct send_context *sc;
6715 int i;
6716
6717 if (flags & FREEZE_SELF)
6718 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6719
6720 /* enter frozen mode */
6721 dd->flags |= HFI1_FROZEN;
6722
6723 /* notify all SDMA engines that they are going into a freeze */
6724 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6725
6726 /* do halt pre-handling on all enabled send contexts */
6727 for (i = 0; i < dd->num_send_contexts; i++) {
6728 sc = dd->send_contexts[i].sc;
6729 if (sc && (sc->flags & SCF_ENABLED))
6730 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6731 }
6732
6733 /* Send context are frozen. Notify user space */
6734 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6735
6736 if (flags & FREEZE_ABORT) {
6737 dd_dev_err(dd,
6738 "Aborted freeze recovery. Please REBOOT system\n");
6739 return;
6740 }
6741 /* queue non-interrupt handler */
6742 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6743}
6744
6745/*
6746 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6747 * depending on the "freeze" parameter.
6748 *
6749 * No need to return an error if it times out, our only option
6750 * is to proceed anyway.
6751 */
6752static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6753{
6754 unsigned long timeout;
6755 u64 reg;
6756
6757 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6758 while (1) {
6759 reg = read_csr(dd, CCE_STATUS);
6760 if (freeze) {
6761 /* waiting until all indicators are set */
6762 if ((reg & ALL_FROZE) == ALL_FROZE)
6763 return; /* all done */
6764 } else {
6765 /* waiting until all indicators are clear */
6766 if ((reg & ALL_FROZE) == 0)
6767 return; /* all done */
6768 }
6769
6770 if (time_after(jiffies, timeout)) {
6771 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006772 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6773 freeze ? "" : "un", reg & ALL_FROZE,
6774 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006775 return;
6776 }
6777 usleep_range(80, 120);
6778 }
6779}
6780
6781/*
6782 * Do all freeze handling for the RXE block.
6783 */
6784static void rxe_freeze(struct hfi1_devdata *dd)
6785{
6786 int i;
6787
6788 /* disable port */
6789 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6790
6791 /* disable all receive contexts */
6792 for (i = 0; i < dd->num_rcv_contexts; i++)
Michael J. Ruhl22505632017-07-24 07:46:06 -07006793 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, dd->rcd[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006794}
6795
6796/*
6797 * Unfreeze handling for the RXE block - kernel contexts only.
6798 * This will also enable the port. User contexts will do unfreeze
6799 * handling on a per-context basis as they call into the driver.
6800 *
6801 */
6802static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6803{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006804 u32 rcvmask;
Michael J. Ruhle6f76222017-07-24 07:45:55 -07006805 u16 i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006806
6807 /* enable all kernel contexts */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07006808 for (i = 0; i < dd->num_rcv_contexts; i++) {
6809 struct hfi1_ctxtdata *rcd = dd->rcd[i];
6810
6811 /* Ensure all non-user contexts(including vnic) are enabled */
6812 if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER))
6813 continue;
6814
Mitko Haralanov566c1572016-02-03 14:32:49 -08006815 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6816 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
Michael J. Ruhl22505632017-07-24 07:46:06 -07006817 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
Mitko Haralanov566c1572016-02-03 14:32:49 -08006818 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
Michael J. Ruhl22505632017-07-24 07:46:06 -07006819 hfi1_rcvctrl(dd, rcvmask, rcd);
Mitko Haralanov566c1572016-02-03 14:32:49 -08006820 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006821
6822 /* enable port */
6823 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6824}
6825
6826/*
6827 * Non-interrupt SPC freeze handling.
6828 *
6829 * This is a work-queue function outside of the triggering interrupt.
6830 */
6831void handle_freeze(struct work_struct *work)
6832{
6833 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6834 freeze_work);
6835 struct hfi1_devdata *dd = ppd->dd;
6836
6837 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006838 wait_for_freeze_status(dd, 1);
6839
6840 /* SPC is now frozen */
6841
6842 /* do send PIO freeze steps */
6843 pio_freeze(dd);
6844
6845 /* do send DMA freeze steps */
6846 sdma_freeze(dd);
6847
6848 /* do send egress freeze steps - nothing to do */
6849
6850 /* do receive freeze steps */
6851 rxe_freeze(dd);
6852
6853 /*
6854 * Unfreeze the hardware - clear the freeze, wait for each
6855 * block's frozen bit to clear, then clear the frozen flag.
6856 */
6857 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6858 wait_for_freeze_status(dd, 0);
6859
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006860 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006861 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6862 wait_for_freeze_status(dd, 1);
6863 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6864 wait_for_freeze_status(dd, 0);
6865 }
6866
6867 /* do send PIO unfreeze steps for kernel contexts */
6868 pio_kernel_unfreeze(dd);
6869
6870 /* do send DMA unfreeze steps */
6871 sdma_unfreeze(dd);
6872
6873 /* do send egress unfreeze steps - nothing to do */
6874
6875 /* do receive unfreeze steps for kernel contexts */
6876 rxe_kernel_unfreeze(dd);
6877
6878 /*
6879 * The unfreeze procedure touches global device registers when
6880 * it disables and re-enables RXE. Mark the device unfrozen
6881 * after all that is done so other parts of the driver waiting
6882 * for the device to unfreeze don't do things out of order.
6883 *
6884 * The above implies that the meaning of HFI1_FROZEN flag is
6885 * "Device has gone into freeze mode and freeze mode handling
6886 * is still in progress."
6887 *
6888 * The flag will be removed when freeze mode processing has
6889 * completed.
6890 */
6891 dd->flags &= ~HFI1_FROZEN;
6892 wake_up(&dd->event_queue);
6893
6894 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006895}
6896
6897/*
6898 * Handle a link up interrupt from the 8051.
6899 *
6900 * This is a work-queue function outside of the interrupt.
6901 */
6902void handle_link_up(struct work_struct *work)
6903{
6904 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006905 link_up_work);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006906 struct hfi1_devdata *dd = ppd->dd;
6907
Mike Marciniszyn77241052015-07-30 15:17:43 -04006908 set_link_state(ppd, HLS_UP_INIT);
6909
6910 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006911 read_ltp_rtt(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006912 /*
6913 * OPA specifies that certain counters are cleared on a transition
6914 * to link up, so do that.
6915 */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006916 clear_linkup_counters(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006917 /*
6918 * And (re)set link up default values.
6919 */
6920 set_linkup_defaults(ppd);
6921
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006922 /*
6923 * Set VL15 credits. Use cached value from verify cap interrupt.
6924 * In case of quick linkup or simulator, vl15 value will be set by
6925 * handle_linkup_change. VerifyCap interrupt handler will not be
6926 * called in those scenarios.
6927 */
6928 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6929 set_up_vl15(dd, dd->vl15buf_cached);
6930
Mike Marciniszyn77241052015-07-30 15:17:43 -04006931 /* enforce link speed enabled */
6932 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6933 /* oops - current speed is not enabled, bounce */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006934 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006935 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6936 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006937 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006938 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006939 set_link_state(ppd, HLS_DN_OFFLINE);
6940 start_link(ppd);
6941 }
6942}
6943
Jubin John4d114fd2016-02-14 20:21:43 -08006944/*
6945 * Several pieces of LNI information were cached for SMA in ppd.
6946 * Reset these on link down
6947 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006948static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6949{
6950 ppd->neighbor_guid = 0;
6951 ppd->neighbor_port_number = 0;
6952 ppd->neighbor_type = 0;
6953 ppd->neighbor_fm_security = 0;
6954}
6955
Dean Luickfeb831d2016-04-14 08:31:36 -07006956static const char * const link_down_reason_strs[] = {
6957 [OPA_LINKDOWN_REASON_NONE] = "None",
Dennis Dalessandro67838e62017-05-29 17:18:46 -07006958 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
Dean Luickfeb831d2016-04-14 08:31:36 -07006959 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6960 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6961 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6962 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6963 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6964 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6965 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6966 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6967 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6968 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6969 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6970 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6971 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6972 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6973 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6974 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6975 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6976 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6977 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6978 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6979 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6980 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6981 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6982 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6983 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6984 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6985 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6986 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6987 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6988 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6989 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6990 "Excessive buffer overrun",
6991 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6992 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6993 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6994 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6995 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6996 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6997 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6998 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6999 "Local media not installed",
7000 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7001 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7002 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7003 "End to end not installed",
7004 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7005 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7006 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7007 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7008 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7009 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7010};
7011
7012/* return the neighbor link down reason string */
7013static const char *link_down_reason_str(u8 reason)
7014{
7015 const char *str = NULL;
7016
7017 if (reason < ARRAY_SIZE(link_down_reason_strs))
7018 str = link_down_reason_strs[reason];
7019 if (!str)
7020 str = "(invalid)";
7021
7022 return str;
7023}
7024
Mike Marciniszyn77241052015-07-30 15:17:43 -04007025/*
7026 * Handle a link down interrupt from the 8051.
7027 *
7028 * This is a work-queue function outside of the interrupt.
7029 */
7030void handle_link_down(struct work_struct *work)
7031{
7032 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07007033 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007034 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07007035 link_down_work);
7036 int was_up;
7037 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04007038
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007039 if ((ppd->host_link_state &
7040 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7041 ppd->port_type == PORT_TYPE_FIXED)
7042 ppd->offline_disabled_reason =
7043 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7044
7045 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07007046 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007047 set_link_state(ppd, HLS_DN_OFFLINE);
Sebastian Sanchez626c0772017-07-29 08:43:55 -07007048 xchg(&ppd->is_link_down_queued, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007049
Dean Luickfeb831d2016-04-14 08:31:36 -07007050 if (was_up) {
7051 lcl_reason = 0;
7052 /* link down reason is only valid if the link was up */
7053 read_link_down_reason(ppd->dd, &link_down_reason);
7054 switch (link_down_reason) {
7055 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7056 /* the link went down, no idle message reason */
7057 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7058 ldr_str);
7059 break;
7060 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7061 /*
7062 * The neighbor reason is only valid if an idle message
7063 * was received for it.
7064 */
7065 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7066 dd_dev_info(ppd->dd,
7067 "%sNeighbor link down message %d, %s\n",
7068 ldr_str, neigh_reason,
7069 link_down_reason_str(neigh_reason));
7070 break;
7071 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7072 dd_dev_info(ppd->dd,
7073 "%sHost requested link to go offline\n",
7074 ldr_str);
7075 break;
7076 default:
7077 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7078 ldr_str, link_down_reason);
7079 break;
7080 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007081
Dean Luickfeb831d2016-04-14 08:31:36 -07007082 /*
7083 * If no reason, assume peer-initiated but missed
7084 * LinkGoingDown idle flits.
7085 */
7086 if (neigh_reason == 0)
7087 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7088 } else {
7089 /* went down while polling or going up */
7090 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7091 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007092
7093 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7094
Dean Luick015e91f2016-04-14 08:31:42 -07007095 /* inform the SMA when the link transitions from up to down */
7096 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7097 ppd->neigh_link_down_reason.sma == 0) {
7098 ppd->local_link_down_reason.sma =
7099 ppd->local_link_down_reason.latest;
7100 ppd->neigh_link_down_reason.sma =
7101 ppd->neigh_link_down_reason.latest;
7102 }
7103
Mike Marciniszyn77241052015-07-30 15:17:43 -04007104 reset_neighbor_info(ppd);
7105
7106 /* disable the port */
7107 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7108
Jubin John4d114fd2016-02-14 20:21:43 -08007109 /*
7110 * If there is no cable attached, turn the DC off. Otherwise,
7111 * start the link bring up.
7112 */
Dean Luick0db9dec2016-09-06 04:35:20 -07007113 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04007114 dc_shutdown(ppd->dd);
Dean Luick0db9dec2016-09-06 04:35:20 -07007115 else
Mike Marciniszyn77241052015-07-30 15:17:43 -04007116 start_link(ppd);
7117}
7118
7119void handle_link_bounce(struct work_struct *work)
7120{
7121 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7122 link_bounce_work);
7123
7124 /*
7125 * Only do something if the link is currently up.
7126 */
7127 if (ppd->host_link_state & HLS_UP) {
7128 set_link_state(ppd, HLS_DN_OFFLINE);
7129 start_link(ppd);
7130 } else {
7131 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007132 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007133 }
7134}
7135
7136/*
7137 * Mask conversion: Capability exchange to Port LTP. The capability
7138 * exchange has an implicit 16b CRC that is mandatory.
7139 */
7140static int cap_to_port_ltp(int cap)
7141{
7142 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7143
7144 if (cap & CAP_CRC_14B)
7145 port_ltp |= PORT_LTP_CRC_MODE_14;
7146 if (cap & CAP_CRC_48B)
7147 port_ltp |= PORT_LTP_CRC_MODE_48;
7148 if (cap & CAP_CRC_12B_16B_PER_LANE)
7149 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7150
7151 return port_ltp;
7152}
7153
7154/*
7155 * Convert an OPA Port LTP mask to capability mask
7156 */
7157int port_ltp_to_cap(int port_ltp)
7158{
7159 int cap_mask = 0;
7160
7161 if (port_ltp & PORT_LTP_CRC_MODE_14)
7162 cap_mask |= CAP_CRC_14B;
7163 if (port_ltp & PORT_LTP_CRC_MODE_48)
7164 cap_mask |= CAP_CRC_48B;
7165 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7166 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7167
7168 return cap_mask;
7169}
7170
7171/*
7172 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7173 */
7174static int lcb_to_port_ltp(int lcb_crc)
7175{
7176 int port_ltp = 0;
7177
7178 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7179 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7180 else if (lcb_crc == LCB_CRC_48B)
7181 port_ltp = PORT_LTP_CRC_MODE_48;
7182 else if (lcb_crc == LCB_CRC_14B)
7183 port_ltp = PORT_LTP_CRC_MODE_14;
7184 else
7185 port_ltp = PORT_LTP_CRC_MODE_16;
7186
7187 return port_ltp;
7188}
7189
7190/*
7191 * Our neighbor has indicated that we are allowed to act as a fabric
7192 * manager, so place the full management partition key in the second
7193 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7194 * that we should already have the limited management partition key in
7195 * array element 1, and also that the port is not yet up when
7196 * add_full_mgmt_pkey() is invoked.
7197 */
7198static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7199{
7200 struct hfi1_devdata *dd = ppd->dd;
7201
Dennis Dalessandroa498fbc2017-04-09 10:17:06 -07007202 /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
Dean Luick87645222015-12-01 15:38:21 -05007203 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7204 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7205 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007206 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7207 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007208 hfi1_event_pkey_change(ppd->dd, ppd->port);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007209}
7210
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007211static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007212{
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007213 if (ppd->pkeys[2] != 0) {
7214 ppd->pkeys[2] = 0;
7215 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007216 hfi1_event_pkey_change(ppd->dd, ppd->port);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007217 }
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007218}
7219
Mike Marciniszyn77241052015-07-30 15:17:43 -04007220/*
7221 * Convert the given link width to the OPA link width bitmask.
7222 */
7223static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7224{
7225 switch (width) {
7226 case 0:
7227 /*
7228 * Simulator and quick linkup do not set the width.
7229 * Just set it to 4x without complaint.
7230 */
7231 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7232 return OPA_LINK_WIDTH_4X;
7233 return 0; /* no lanes up */
7234 case 1: return OPA_LINK_WIDTH_1X;
7235 case 2: return OPA_LINK_WIDTH_2X;
7236 case 3: return OPA_LINK_WIDTH_3X;
7237 default:
7238 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007239 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007240 /* fall through */
7241 case 4: return OPA_LINK_WIDTH_4X;
7242 }
7243}
7244
7245/*
7246 * Do a population count on the bottom nibble.
7247 */
7248static const u8 bit_counts[16] = {
7249 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7250};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007251
Mike Marciniszyn77241052015-07-30 15:17:43 -04007252static inline u8 nibble_to_count(u8 nibble)
7253{
7254 return bit_counts[nibble & 0xf];
7255}
7256
7257/*
7258 * Read the active lane information from the 8051 registers and return
7259 * their widths.
7260 *
7261 * Active lane information is found in these 8051 registers:
7262 * enable_lane_tx
7263 * enable_lane_rx
7264 */
7265static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7266 u16 *rx_width)
7267{
7268 u16 tx, rx;
7269 u8 enable_lane_rx;
7270 u8 enable_lane_tx;
7271 u8 tx_polarity_inversion;
7272 u8 rx_polarity_inversion;
7273 u8 max_rate;
7274
7275 /* read the active lanes */
7276 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007277 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007278 read_local_lni(dd, &enable_lane_rx);
7279
7280 /* convert to counts */
7281 tx = nibble_to_count(enable_lane_tx);
7282 rx = nibble_to_count(enable_lane_rx);
7283
7284 /*
7285 * Set link_speed_active here, overriding what was set in
7286 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7287 * set the max_rate field in handle_verify_cap until v0.19.
7288 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007289 if ((dd->icode == ICODE_RTL_SILICON) &&
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007290 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007291 /* max_rate: 0 = 12.5G, 1 = 25G */
7292 switch (max_rate) {
7293 case 0:
7294 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7295 break;
7296 default:
7297 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007298 "%s: unexpected max rate %d, using 25Gb\n",
7299 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007300 /* fall through */
7301 case 1:
7302 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7303 break;
7304 }
7305 }
7306
7307 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007308 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7309 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007310 *tx_width = link_width_to_bits(dd, tx);
7311 *rx_width = link_width_to_bits(dd, rx);
7312}
7313
7314/*
7315 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7316 * Valid after the end of VerifyCap and during LinkUp. Does not change
7317 * after link up. I.e. look elsewhere for downgrade information.
7318 *
7319 * Bits are:
7320 * + bits [7:4] contain the number of active transmitters
7321 * + bits [3:0] contain the number of active receivers
7322 * These are numbers 1 through 4 and can be different values if the
7323 * link is asymmetric.
7324 *
7325 * verify_cap_local_fm_link_width[0] retains its original value.
7326 */
7327static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7328 u16 *rx_width)
7329{
7330 u16 widths, tx, rx;
7331 u8 misc_bits, local_flags;
7332 u16 active_tx, active_rx;
7333
7334 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7335 tx = widths >> 12;
7336 rx = (widths >> 8) & 0xf;
7337
7338 *tx_width = link_width_to_bits(dd, tx);
7339 *rx_width = link_width_to_bits(dd, rx);
7340
7341 /* print the active widths */
7342 get_link_widths(dd, &active_tx, &active_rx);
7343}
7344
7345/*
7346 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7347 * hardware information when the link first comes up.
7348 *
7349 * The link width is not available until after VerifyCap.AllFramesReceived
7350 * (the trigger for handle_verify_cap), so this is outside that routine
7351 * and should be called when the 8051 signals linkup.
7352 */
7353void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7354{
7355 u16 tx_width, rx_width;
7356
7357 /* get end-of-LNI link widths */
7358 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7359
7360 /* use tx_width as the link is supposed to be symmetric on link up */
7361 ppd->link_width_active = tx_width;
7362 /* link width downgrade active (LWD.A) starts out matching LW.A */
7363 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7364 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7365 /* per OPA spec, on link up LWD.E resets to LWD.S */
7366 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7367 /* cache the active egress rate (units {10^6 bits/sec]) */
7368 ppd->current_egress_rate = active_egress_rate(ppd);
7369}
7370
7371/*
7372 * Handle a verify capabilities interrupt from the 8051.
7373 *
7374 * This is a work-queue function outside of the interrupt.
7375 */
7376void handle_verify_cap(struct work_struct *work)
7377{
7378 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7379 link_vc_work);
7380 struct hfi1_devdata *dd = ppd->dd;
7381 u64 reg;
7382 u8 power_management;
7383 u8 continious;
7384 u8 vcu;
7385 u8 vau;
7386 u8 z;
7387 u16 vl15buf;
7388 u16 link_widths;
7389 u16 crc_mask;
7390 u16 crc_val;
7391 u16 device_id;
7392 u16 active_tx, active_rx;
7393 u8 partner_supported_crc;
7394 u8 remote_tx_rate;
7395 u8 device_rev;
7396
7397 set_link_state(ppd, HLS_VERIFY_CAP);
7398
7399 lcb_shutdown(dd, 0);
7400 adjust_lcb_for_fpga_serdes(dd);
7401
Mike Marciniszyn77241052015-07-30 15:17:43 -04007402 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007403 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7404 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007405 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7406 read_remote_device_id(dd, &device_id, &device_rev);
7407 /*
7408 * And the 'MgmtAllowed' information, which is exchanged during
7409 * LNI, is also be available at this point.
7410 */
7411 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7412 /* print the active widths */
7413 get_link_widths(dd, &active_tx, &active_rx);
7414 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007415 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7416 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007417 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007418 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7419 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7420 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007421 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007422 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007423 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007424 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007425 /*
7426 * The peer vAU value just read is the peer receiver value. HFI does
7427 * not support a transmit vAU of 0 (AU == 8). We advertised that
7428 * with Z=1 in the fabric capabilities sent to the peer. The peer
7429 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7430 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7431 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7432 * subject to the Z value exception.
7433 */
7434 if (vau == 0)
7435 vau = 1;
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07007436 set_up_vau(dd, vau);
7437
7438 /*
7439 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7440 * credits value and wait for link-up interrupt ot set it.
7441 */
7442 set_up_vl15(dd, 0);
7443 dd->vl15buf_cached = vl15buf;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007444
7445 /* set up the LCB CRC mode */
7446 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7447
7448 /* order is important: use the lowest bit in common */
7449 if (crc_mask & CAP_CRC_14B)
7450 crc_val = LCB_CRC_14B;
7451 else if (crc_mask & CAP_CRC_48B)
7452 crc_val = LCB_CRC_48B;
7453 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7454 crc_val = LCB_CRC_12B_16B_PER_LANE;
7455 else
7456 crc_val = LCB_CRC_16B;
7457
7458 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7459 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7460 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7461
7462 /* set (14b only) or clear sideband credit */
7463 reg = read_csr(dd, SEND_CM_CTRL);
7464 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7465 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007466 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007467 } else {
7468 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007469 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007470 }
7471
7472 ppd->link_speed_active = 0; /* invalid value */
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007473 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007474 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7475 switch (remote_tx_rate) {
7476 case 0:
7477 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7478 break;
7479 case 1:
7480 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7481 break;
7482 }
7483 } else {
7484 /* actual rate is highest bit of the ANDed rates */
7485 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7486
7487 if (rate & 2)
7488 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7489 else if (rate & 1)
7490 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7491 }
7492 if (ppd->link_speed_active == 0) {
7493 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007494 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007495 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7496 }
7497
7498 /*
7499 * Cache the values of the supported, enabled, and active
7500 * LTP CRC modes to return in 'portinfo' queries. But the bit
7501 * flags that are returned in the portinfo query differ from
7502 * what's in the link_crc_mask, crc_sizes, and crc_val
7503 * variables. Convert these here.
7504 */
7505 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7506 /* supported crc modes */
7507 ppd->port_ltp_crc_mode |=
7508 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7509 /* enabled crc modes */
7510 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7511 /* active crc mode */
7512
7513 /* set up the remote credit return table */
7514 assign_remote_cm_au_table(dd, vcu);
7515
7516 /*
7517 * The LCB is reset on entry to handle_verify_cap(), so this must
7518 * be applied on every link up.
7519 *
7520 * Adjust LCB error kill enable to kill the link if
7521 * these RBUF errors are seen:
7522 * REPLAY_BUF_MBE_SMASK
7523 * FLIT_INPUT_BUF_MBE_SMASK
7524 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007525 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007526 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7527 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7528 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7529 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7530 }
7531
7532 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7533 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7534
7535 /* give 8051 access to the LCB CSRs */
7536 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7537 set_8051_lcb_access(dd);
7538
Mike Marciniszyn77241052015-07-30 15:17:43 -04007539 if (ppd->mgmt_allowed)
7540 add_full_mgmt_pkey(ppd);
7541
7542 /* tell the 8051 to go to LinkUp */
7543 set_link_state(ppd, HLS_GOING_UP);
7544}
7545
7546/*
7547 * Apply the link width downgrade enabled policy against the current active
7548 * link widths.
7549 *
7550 * Called when the enabled policy changes or the active link widths change.
7551 */
7552void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7553{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007554 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007555 int tries;
7556 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007557 u16 tx, rx;
7558
Dean Luick323fd782015-11-16 21:59:24 -05007559 /* use the hls lock to avoid a race with actual link up */
7560 tries = 0;
7561retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007562 mutex_lock(&ppd->hls_lock);
7563 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007564 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007565 /* still going up..wait and retry */
7566 if (ppd->host_link_state & HLS_GOING_UP) {
7567 if (++tries < 1000) {
7568 mutex_unlock(&ppd->hls_lock);
7569 usleep_range(100, 120); /* arbitrary */
7570 goto retry;
7571 }
7572 dd_dev_err(ppd->dd,
7573 "%s: giving up waiting for link state change\n",
7574 __func__);
7575 }
7576 goto done;
7577 }
7578
7579 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007580
7581 if (refresh_widths) {
7582 get_link_widths(ppd->dd, &tx, &rx);
7583 ppd->link_width_downgrade_tx_active = tx;
7584 ppd->link_width_downgrade_rx_active = rx;
7585 }
7586
Dean Luickf9b56352016-04-14 08:31:30 -07007587 if (ppd->link_width_downgrade_tx_active == 0 ||
7588 ppd->link_width_downgrade_rx_active == 0) {
7589 /* the 8051 reported a dead link as a downgrade */
7590 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7591 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007592 /* downgrade is disabled */
7593
7594 /* bounce if not at starting active width */
7595 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007596 ppd->link_width_downgrade_tx_active) ||
7597 (ppd->link_width_active !=
7598 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007599 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007600 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007601 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007602 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7603 ppd->link_width_active,
7604 ppd->link_width_downgrade_tx_active,
7605 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007606 do_bounce = 1;
7607 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007608 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7609 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007610 /* Tx or Rx is outside the enabled policy */
7611 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007612 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007613 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007614 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7615 lwde, ppd->link_width_downgrade_tx_active,
7616 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007617 do_bounce = 1;
7618 }
7619
Dean Luick323fd782015-11-16 21:59:24 -05007620done:
7621 mutex_unlock(&ppd->hls_lock);
7622
Mike Marciniszyn77241052015-07-30 15:17:43 -04007623 if (do_bounce) {
7624 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007625 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007626 set_link_state(ppd, HLS_DN_OFFLINE);
7627 start_link(ppd);
7628 }
7629}
7630
7631/*
7632 * Handle a link downgrade interrupt from the 8051.
7633 *
7634 * This is a work-queue function outside of the interrupt.
7635 */
7636void handle_link_downgrade(struct work_struct *work)
7637{
7638 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7639 link_downgrade_work);
7640
7641 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7642 apply_link_downgrade_policy(ppd, 1);
7643}
7644
7645static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7646{
7647 return flag_string(buf, buf_len, flags, dcc_err_flags,
7648 ARRAY_SIZE(dcc_err_flags));
7649}
7650
7651static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7652{
7653 return flag_string(buf, buf_len, flags, lcb_err_flags,
7654 ARRAY_SIZE(lcb_err_flags));
7655}
7656
7657static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7658{
7659 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7660 ARRAY_SIZE(dc8051_err_flags));
7661}
7662
7663static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7664{
7665 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7666 ARRAY_SIZE(dc8051_info_err_flags));
7667}
7668
7669static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7670{
7671 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7672 ARRAY_SIZE(dc8051_info_host_msg_flags));
7673}
7674
7675static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7676{
7677 struct hfi1_pportdata *ppd = dd->pport;
7678 u64 info, err, host_msg;
7679 int queue_link_down = 0;
7680 char buf[96];
7681
7682 /* look at the flags */
7683 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7684 /* 8051 information set by firmware */
7685 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7686 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7687 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7688 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7689 host_msg = (info >>
7690 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7691 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7692
7693 /*
7694 * Handle error flags.
7695 */
7696 if (err & FAILED_LNI) {
7697 /*
7698 * LNI error indications are cleared by the 8051
7699 * only when starting polling. Only pay attention
7700 * to them when in the states that occur during
7701 * LNI.
7702 */
7703 if (ppd->host_link_state
7704 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7705 queue_link_down = 1;
7706 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007707 dc8051_info_err_string(buf,
7708 sizeof(buf),
7709 err &
7710 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007711 }
7712 err &= ~(u64)FAILED_LNI;
7713 }
Dean Luick6d014532015-12-01 15:38:23 -05007714 /* unknown frames can happen durning LNI, just count */
7715 if (err & UNKNOWN_FRAME) {
7716 ppd->unknown_frame_count++;
7717 err &= ~(u64)UNKNOWN_FRAME;
7718 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007719 if (err) {
7720 /* report remaining errors, but do not do anything */
7721 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007722 dc8051_info_err_string(buf, sizeof(buf),
7723 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007724 }
7725
7726 /*
7727 * Handle host message flags.
7728 */
7729 if (host_msg & HOST_REQ_DONE) {
7730 /*
7731 * Presently, the driver does a busy wait for
7732 * host requests to complete. This is only an
7733 * informational message.
7734 * NOTE: The 8051 clears the host message
7735 * information *on the next 8051 command*.
7736 * Therefore, when linkup is achieved,
7737 * this flag will still be set.
7738 */
7739 host_msg &= ~(u64)HOST_REQ_DONE;
7740 }
7741 if (host_msg & BC_SMA_MSG) {
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007742 queue_work(ppd->link_wq, &ppd->sma_message_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007743 host_msg &= ~(u64)BC_SMA_MSG;
7744 }
7745 if (host_msg & LINKUP_ACHIEVED) {
7746 dd_dev_info(dd, "8051: Link up\n");
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007747 queue_work(ppd->link_wq, &ppd->link_up_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007748 host_msg &= ~(u64)LINKUP_ACHIEVED;
7749 }
7750 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007751 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007752 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7753 }
7754 if (host_msg & VERIFY_CAP_FRAME) {
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007755 queue_work(ppd->link_wq, &ppd->link_vc_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007756 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7757 }
7758 if (host_msg & LINK_GOING_DOWN) {
7759 const char *extra = "";
7760 /* no downgrade action needed if going down */
7761 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7762 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7763 extra = " (ignoring downgrade)";
7764 }
7765 dd_dev_info(dd, "8051: Link down%s\n", extra);
7766 queue_link_down = 1;
7767 host_msg &= ~(u64)LINK_GOING_DOWN;
7768 }
7769 if (host_msg & LINK_WIDTH_DOWNGRADED) {
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007770 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007771 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7772 }
7773 if (host_msg) {
7774 /* report remaining messages, but do not do anything */
7775 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007776 dc8051_info_host_msg_string(buf,
7777 sizeof(buf),
7778 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007779 }
7780
7781 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7782 }
7783 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7784 /*
7785 * Lost the 8051 heartbeat. If this happens, we
7786 * receive constant interrupts about it. Disable
7787 * the interrupt after the first.
7788 */
7789 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7790 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007791 read_csr(dd, DC_DC8051_ERR_EN) &
7792 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007793
7794 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7795 }
7796 if (reg) {
7797 /* report the error, but do not do anything */
7798 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007799 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007800 }
7801
7802 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007803 /*
7804 * if the link is already going down or disabled, do not
7805 * queue another
7806 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007807 if ((ppd->host_link_state &
7808 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
Sebastian Sanchez626c0772017-07-29 08:43:55 -07007809 ppd->link_enabled == 0 || ppd->is_link_down_queued) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007810 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007811 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007812 } else {
Sebastian Sanchez626c0772017-07-29 08:43:55 -07007813 xchg(&ppd->is_link_down_queued, 1);
Sebastian Sanchez71d47002017-07-29 08:43:49 -07007814 queue_work(ppd->link_wq, &ppd->link_down_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007815 }
7816 }
7817}
7818
7819static const char * const fm_config_txt[] = {
7820[0] =
7821 "BadHeadDist: Distance violation between two head flits",
7822[1] =
7823 "BadTailDist: Distance violation between two tail flits",
7824[2] =
7825 "BadCtrlDist: Distance violation between two credit control flits",
7826[3] =
7827 "BadCrdAck: Credits return for unsupported VL",
7828[4] =
7829 "UnsupportedVLMarker: Received VL Marker",
7830[5] =
7831 "BadPreempt: Exceeded the preemption nesting level",
7832[6] =
7833 "BadControlFlit: Received unsupported control flit",
7834/* no 7 */
7835[8] =
7836 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7837};
7838
7839static const char * const port_rcv_txt[] = {
7840[1] =
7841 "BadPktLen: Illegal PktLen",
7842[2] =
7843 "PktLenTooLong: Packet longer than PktLen",
7844[3] =
7845 "PktLenTooShort: Packet shorter than PktLen",
7846[4] =
7847 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7848[5] =
7849 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7850[6] =
7851 "BadL2: Illegal L2 opcode",
7852[7] =
7853 "BadSC: Unsupported SC",
7854[9] =
7855 "BadRC: Illegal RC",
7856[11] =
7857 "PreemptError: Preempting with same VL",
7858[12] =
7859 "PreemptVL15: Preempting a VL15 packet",
7860};
7861
7862#define OPA_LDR_FMCONFIG_OFFSET 16
7863#define OPA_LDR_PORTRCV_OFFSET 0
7864static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7865{
7866 u64 info, hdr0, hdr1;
7867 const char *extra;
7868 char buf[96];
7869 struct hfi1_pportdata *ppd = dd->pport;
7870 u8 lcl_reason = 0;
7871 int do_bounce = 0;
7872
7873 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7874 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7875 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7876 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7877 /* set status bit */
7878 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7879 }
7880 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7881 }
7882
7883 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7884 struct hfi1_pportdata *ppd = dd->pport;
7885 /* this counter saturates at (2^32) - 1 */
7886 if (ppd->link_downed < (u32)UINT_MAX)
7887 ppd->link_downed++;
7888 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7889 }
7890
7891 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7892 u8 reason_valid = 1;
7893
7894 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7895 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7896 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7897 /* set status bit */
7898 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7899 }
7900 switch (info) {
7901 case 0:
7902 case 1:
7903 case 2:
7904 case 3:
7905 case 4:
7906 case 5:
7907 case 6:
7908 extra = fm_config_txt[info];
7909 break;
7910 case 8:
7911 extra = fm_config_txt[info];
7912 if (ppd->port_error_action &
7913 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7914 do_bounce = 1;
7915 /*
7916 * lcl_reason cannot be derived from info
7917 * for this error
7918 */
7919 lcl_reason =
7920 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7921 }
7922 break;
7923 default:
7924 reason_valid = 0;
7925 snprintf(buf, sizeof(buf), "reserved%lld", info);
7926 extra = buf;
7927 break;
7928 }
7929
7930 if (reason_valid && !do_bounce) {
7931 do_bounce = ppd->port_error_action &
7932 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7933 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7934 }
7935
7936 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007937 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7938 extra);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007939 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7940 }
7941
7942 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7943 u8 reason_valid = 1;
7944
7945 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7946 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7947 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7948 if (!(dd->err_info_rcvport.status_and_code &
7949 OPA_EI_STATUS_SMASK)) {
7950 dd->err_info_rcvport.status_and_code =
7951 info & OPA_EI_CODE_SMASK;
7952 /* set status bit */
7953 dd->err_info_rcvport.status_and_code |=
7954 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007955 /*
7956 * save first 2 flits in the packet that caused
7957 * the error
7958 */
Bart Van Assche48a0cc132016-06-03 12:09:56 -07007959 dd->err_info_rcvport.packet_flit1 = hdr0;
7960 dd->err_info_rcvport.packet_flit2 = hdr1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007961 }
7962 switch (info) {
7963 case 1:
7964 case 2:
7965 case 3:
7966 case 4:
7967 case 5:
7968 case 6:
7969 case 7:
7970 case 9:
7971 case 11:
7972 case 12:
7973 extra = port_rcv_txt[info];
7974 break;
7975 default:
7976 reason_valid = 0;
7977 snprintf(buf, sizeof(buf), "reserved%lld", info);
7978 extra = buf;
7979 break;
7980 }
7981
7982 if (reason_valid && !do_bounce) {
7983 do_bounce = ppd->port_error_action &
7984 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7985 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7986 }
7987
7988 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007989 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
7990 " hdr0 0x%llx, hdr1 0x%llx\n",
7991 extra, hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007992
7993 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7994 }
7995
7996 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7997 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007998 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007999 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8000 }
8001 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8002 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08008003 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008004 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8005 }
8006
Don Hiatt243d9f42017-03-20 17:26:20 -07008007 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8008 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8009
Mike Marciniszyn77241052015-07-30 15:17:43 -04008010 /* report any remaining errors */
8011 if (reg)
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08008012 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8013 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008014
8015 if (lcl_reason == 0)
8016 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8017
8018 if (do_bounce) {
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08008019 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8020 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008021 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
Sebastian Sanchez71d47002017-07-29 08:43:49 -07008022 queue_work(ppd->link_wq, &ppd->link_bounce_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008023 }
8024}
8025
8026static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8027{
8028 char buf[96];
8029
8030 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008031 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008032}
8033
8034/*
8035 * CCE block DC interrupt. Source is < 8.
8036 */
8037static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8038{
8039 const struct err_reg_info *eri = &dc_errs[source];
8040
8041 if (eri->handler) {
8042 interrupt_clear_down(dd, 0, eri);
8043 } else if (source == 3 /* dc_lbm_int */) {
8044 /*
8045 * This indicates that a parity error has occurred on the
8046 * address/control lines presented to the LBM. The error
8047 * is a single pulse, there is no associated error flag,
8048 * and it is non-maskable. This is because if a parity
8049 * error occurs on the request the request is dropped.
8050 * This should never occur, but it is nice to know if it
8051 * ever does.
8052 */
8053 dd_dev_err(dd, "Parity error in DC LBM block\n");
8054 } else {
8055 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8056 }
8057}
8058
8059/*
8060 * TX block send credit interrupt. Source is < 160.
8061 */
8062static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8063{
8064 sc_group_release_update(dd, source);
8065}
8066
8067/*
8068 * TX block SDMA interrupt. Source is < 48.
8069 *
8070 * SDMA interrupts are grouped by type:
8071 *
8072 * 0 - N-1 = SDma
8073 * N - 2N-1 = SDmaProgress
8074 * 2N - 3N-1 = SDmaIdle
8075 */
8076static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8077{
8078 /* what interrupt */
8079 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8080 /* which engine */
8081 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8082
8083#ifdef CONFIG_SDMA_VERBOSITY
8084 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8085 slashstrip(__FILE__), __LINE__, __func__);
8086 sdma_dumpstate(&dd->per_sdma[which]);
8087#endif
8088
8089 if (likely(what < 3 && which < dd->num_sdma)) {
8090 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8091 } else {
8092 /* should not happen */
8093 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8094 }
8095}
8096
8097/*
8098 * RX block receive available interrupt. Source is < 160.
8099 */
8100static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8101{
8102 struct hfi1_ctxtdata *rcd;
8103 char *err_detail;
8104
8105 if (likely(source < dd->num_rcv_contexts)) {
8106 rcd = dd->rcd[source];
8107 if (rcd) {
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07008108 /* Check for non-user contexts, including vnic */
8109 if ((source < dd->first_dyn_alloc_ctxt) ||
8110 (rcd->sc && (rcd->sc->type == SC_KERNEL)))
Dean Luickf4f30031c2015-10-26 10:28:44 -04008111 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008112 else
8113 handle_user_interrupt(rcd);
8114 return; /* OK */
8115 }
8116 /* received an interrupt, but no rcd */
8117 err_detail = "dataless";
8118 } else {
8119 /* received an interrupt, but are not using that context */
8120 err_detail = "out of range";
8121 }
8122 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008123 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008124}
8125
8126/*
8127 * RX block receive urgent interrupt. Source is < 160.
8128 */
8129static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8130{
8131 struct hfi1_ctxtdata *rcd;
8132 char *err_detail;
8133
8134 if (likely(source < dd->num_rcv_contexts)) {
8135 rcd = dd->rcd[source];
8136 if (rcd) {
8137 /* only pay attention to user urgent interrupts */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07008138 if ((source >= dd->first_dyn_alloc_ctxt) &&
8139 (!rcd->sc || (rcd->sc->type == SC_USER)))
Mike Marciniszyn77241052015-07-30 15:17:43 -04008140 handle_user_interrupt(rcd);
8141 return; /* OK */
8142 }
8143 /* received an interrupt, but no rcd */
8144 err_detail = "dataless";
8145 } else {
8146 /* received an interrupt, but are not using that context */
8147 err_detail = "out of range";
8148 }
8149 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008150 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008151}
8152
8153/*
8154 * Reserved range interrupt. Should not be called in normal operation.
8155 */
8156static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8157{
8158 char name[64];
8159
8160 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008161 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008162}
8163
8164static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008165/*
8166 * start end
8167 * name func interrupt func
8168 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008169{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8170 is_misc_err_name, is_misc_err_int },
8171{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8172 is_sdma_eng_err_name, is_sdma_eng_err_int },
8173{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8174 is_sendctxt_err_name, is_sendctxt_err_int },
8175{ IS_SDMA_START, IS_SDMA_END,
8176 is_sdma_eng_name, is_sdma_eng_int },
8177{ IS_VARIOUS_START, IS_VARIOUS_END,
8178 is_various_name, is_various_int },
8179{ IS_DC_START, IS_DC_END,
8180 is_dc_name, is_dc_int },
8181{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8182 is_rcv_avail_name, is_rcv_avail_int },
8183{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8184 is_rcv_urgent_name, is_rcv_urgent_int },
8185{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8186 is_send_credit_name, is_send_credit_int},
8187{ IS_RESERVED_START, IS_RESERVED_END,
8188 is_reserved_name, is_reserved_int},
8189};
8190
8191/*
8192 * Interrupt source interrupt - called when the given source has an interrupt.
8193 * Source is a bit index into an array of 64-bit integers.
8194 */
8195static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8196{
8197 const struct is_table *entry;
8198
8199 /* avoids a double compare by walking the table in-order */
8200 for (entry = &is_table[0]; entry->is_name; entry++) {
8201 if (source < entry->end) {
8202 trace_hfi1_interrupt(dd, entry, source);
8203 entry->is_int(dd, source - entry->start);
8204 return;
8205 }
8206 }
8207 /* fell off the end */
8208 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8209}
8210
8211/*
8212 * General interrupt handler. This is able to correctly handle
8213 * all interrupts in case INTx is used.
8214 */
8215static irqreturn_t general_interrupt(int irq, void *data)
8216{
8217 struct hfi1_devdata *dd = data;
8218 u64 regs[CCE_NUM_INT_CSRS];
8219 u32 bit;
8220 int i;
8221
8222 this_cpu_inc(*dd->int_counter);
8223
8224 /* phase 1: scan and clear all handled interrupts */
8225 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8226 if (dd->gi_mask[i] == 0) {
8227 regs[i] = 0; /* used later */
8228 continue;
8229 }
8230 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8231 dd->gi_mask[i];
8232 /* only clear if anything is set */
8233 if (regs[i])
8234 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8235 }
8236
8237 /* phase 2: call the appropriate handler */
8238 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008239 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008240 is_interrupt(dd, bit);
8241 }
8242
8243 return IRQ_HANDLED;
8244}
8245
8246static irqreturn_t sdma_interrupt(int irq, void *data)
8247{
8248 struct sdma_engine *sde = data;
8249 struct hfi1_devdata *dd = sde->dd;
8250 u64 status;
8251
8252#ifdef CONFIG_SDMA_VERBOSITY
8253 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8254 slashstrip(__FILE__), __LINE__, __func__);
8255 sdma_dumpstate(sde);
8256#endif
8257
8258 this_cpu_inc(*dd->int_counter);
8259
8260 /* This read_csr is really bad in the hot path */
8261 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008262 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8263 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008264 if (likely(status)) {
8265 /* clear the interrupt(s) */
8266 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008267 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8268 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008269
8270 /* handle the interrupt(s) */
8271 sdma_engine_interrupt(sde, status);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -07008272 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008273 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008274 sde->this_idx);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -07008275 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04008276 return IRQ_HANDLED;
8277}
8278
8279/*
Dean Luickecd42f82016-02-03 14:35:14 -08008280 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8281 * to insure that the write completed. This does NOT guarantee that
8282 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008283 */
8284static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8285{
8286 struct hfi1_devdata *dd = rcd->dd;
8287 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8288
8289 mmiowb(); /* make sure everything before is written */
8290 write_csr(dd, addr, rcd->imask);
8291 /* force the above write on the chip and get a value back */
8292 (void)read_csr(dd, addr);
8293}
8294
8295/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008296void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008297{
8298 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8299}
8300
Dean Luickecd42f82016-02-03 14:35:14 -08008301/*
8302 * Return non-zero if a packet is present.
8303 *
8304 * This routine is called when rechecking for packets after the RcvAvail
8305 * interrupt has been cleared down. First, do a quick check of memory for
8306 * a packet present. If not found, use an expensive CSR read of the context
8307 * tail to determine the actual tail. The CSR read is necessary because there
8308 * is no method to push pending DMAs to memory other than an interrupt and we
8309 * are trying to determine if we need to force an interrupt.
8310 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008311static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8312{
Dean Luickecd42f82016-02-03 14:35:14 -08008313 u32 tail;
8314 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008315
Dean Luickecd42f82016-02-03 14:35:14 -08008316 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8317 present = (rcd->seq_cnt ==
8318 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8319 else /* is RDMA rtail */
8320 present = (rcd->head != get_rcvhdrtail(rcd));
8321
8322 if (present)
8323 return 1;
8324
8325 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8326 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8327 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008328}
8329
8330/*
8331 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8332 * This routine will try to handle packets immediately (latency), but if
8333 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008334 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008335 * invoked) is finished. The intent is to avoid extra interrupts while we
8336 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008337 */
8338static irqreturn_t receive_context_interrupt(int irq, void *data)
8339{
8340 struct hfi1_ctxtdata *rcd = data;
8341 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008342 int disposition;
8343 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008344
8345 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8346 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008347 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008348
Dean Luickf4f30031c2015-10-26 10:28:44 -04008349 /* receive interrupt remains blocked while processing packets */
8350 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008351
Dean Luickf4f30031c2015-10-26 10:28:44 -04008352 /*
8353 * Too many packets were seen while processing packets in this
8354 * IRQ handler. Invoke the handler thread. The receive interrupt
8355 * remains blocked.
8356 */
8357 if (disposition == RCV_PKT_LIMIT)
8358 return IRQ_WAKE_THREAD;
8359
8360 /*
8361 * The packet processor detected no more packets. Clear the receive
8362 * interrupt and recheck for a packet packet that may have arrived
8363 * after the previous check and interrupt clear. If a packet arrived,
8364 * force another interrupt.
8365 */
8366 clear_recv_intr(rcd);
8367 present = check_packet_present(rcd);
8368 if (present)
8369 force_recv_intr(rcd);
8370
8371 return IRQ_HANDLED;
8372}
8373
8374/*
8375 * Receive packet thread handler. This expects to be invoked with the
8376 * receive interrupt still blocked.
8377 */
8378static irqreturn_t receive_context_thread(int irq, void *data)
8379{
8380 struct hfi1_ctxtdata *rcd = data;
8381 int present;
8382
8383 /* receive interrupt is still blocked from the IRQ handler */
8384 (void)rcd->do_interrupt(rcd, 1);
8385
8386 /*
8387 * The packet processor will only return if it detected no more
8388 * packets. Hold IRQs here so we can safely clear the interrupt and
8389 * recheck for a packet that may have arrived after the previous
8390 * check and the interrupt clear. If a packet arrived, force another
8391 * interrupt.
8392 */
8393 local_irq_disable();
8394 clear_recv_intr(rcd);
8395 present = check_packet_present(rcd);
8396 if (present)
8397 force_recv_intr(rcd);
8398 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008399
8400 return IRQ_HANDLED;
8401}
8402
8403/* ========================================================================= */
8404
8405u32 read_physical_state(struct hfi1_devdata *dd)
8406{
8407 u64 reg;
8408
8409 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8410 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8411 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8412}
8413
Jim Snowfb9036d2016-01-11 18:32:21 -05008414u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008415{
8416 u64 reg;
8417
8418 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8419 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8420 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8421}
8422
8423static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8424{
8425 u64 reg;
8426
8427 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8428 /* clear current state, set new state */
8429 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8430 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8431 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8432}
8433
8434/*
8435 * Use the 8051 to read a LCB CSR.
8436 */
8437static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8438{
8439 u32 regno;
8440 int ret;
8441
8442 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8443 if (acquire_lcb_access(dd, 0) == 0) {
8444 *data = read_csr(dd, addr);
8445 release_lcb_access(dd, 0);
8446 return 0;
8447 }
8448 return -EBUSY;
8449 }
8450
8451 /* register is an index of LCB registers: (offset - base) / 8 */
8452 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8453 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8454 if (ret != HCMD_SUCCESS)
8455 return -EBUSY;
8456 return 0;
8457}
8458
8459/*
Michael J. Ruhl86884262017-03-20 17:24:51 -07008460 * Provide a cache for some of the LCB registers in case the LCB is
8461 * unavailable.
8462 * (The LCB is unavailable in certain link states, for example.)
8463 */
8464struct lcb_datum {
8465 u32 off;
8466 u64 val;
8467};
8468
8469static struct lcb_datum lcb_cache[] = {
8470 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8471 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8472 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8473};
8474
8475static void update_lcb_cache(struct hfi1_devdata *dd)
8476{
8477 int i;
8478 int ret;
8479 u64 val;
8480
8481 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8482 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8483
8484 /* Update if we get good data */
8485 if (likely(ret != -EBUSY))
8486 lcb_cache[i].val = val;
8487 }
8488}
8489
8490static int read_lcb_cache(u32 off, u64 *val)
8491{
8492 int i;
8493
8494 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8495 if (lcb_cache[i].off == off) {
8496 *val = lcb_cache[i].val;
8497 return 0;
8498 }
8499 }
8500
8501 pr_warn("%s bad offset 0x%x\n", __func__, off);
8502 return -1;
8503}
8504
8505/*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008506 * Read an LCB CSR. Access may not be in host control, so check.
8507 * Return 0 on success, -EBUSY on failure.
8508 */
8509int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8510{
8511 struct hfi1_pportdata *ppd = dd->pport;
8512
8513 /* if up, go through the 8051 for the value */
8514 if (ppd->host_link_state & HLS_UP)
8515 return read_lcb_via_8051(dd, addr, data);
Michael J. Ruhl86884262017-03-20 17:24:51 -07008516 /* if going up or down, check the cache, otherwise, no access */
8517 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8518 if (read_lcb_cache(addr, data))
8519 return -EBUSY;
8520 return 0;
8521 }
8522
Mike Marciniszyn77241052015-07-30 15:17:43 -04008523 /* otherwise, host has access */
8524 *data = read_csr(dd, addr);
8525 return 0;
8526}
8527
8528/*
8529 * Use the 8051 to write a LCB CSR.
8530 */
8531static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8532{
Dean Luick3bf40d62015-11-06 20:07:04 -05008533 u32 regno;
8534 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008535
Dean Luick3bf40d62015-11-06 20:07:04 -05008536 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008537 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
Dean Luick3bf40d62015-11-06 20:07:04 -05008538 if (acquire_lcb_access(dd, 0) == 0) {
8539 write_csr(dd, addr, data);
8540 release_lcb_access(dd, 0);
8541 return 0;
8542 }
8543 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008544 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008545
8546 /* register is an index of LCB registers: (offset - base) / 8 */
8547 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8548 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8549 if (ret != HCMD_SUCCESS)
8550 return -EBUSY;
8551 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008552}
8553
8554/*
8555 * Write an LCB CSR. Access may not be in host control, so check.
8556 * Return 0 on success, -EBUSY on failure.
8557 */
8558int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8559{
8560 struct hfi1_pportdata *ppd = dd->pport;
8561
8562 /* if up, go through the 8051 for the value */
8563 if (ppd->host_link_state & HLS_UP)
8564 return write_lcb_via_8051(dd, addr, data);
8565 /* if going up or down, no access */
8566 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8567 return -EBUSY;
8568 /* otherwise, host has access */
8569 write_csr(dd, addr, data);
8570 return 0;
8571}
8572
8573/*
8574 * Returns:
8575 * < 0 = Linux error, not able to get access
8576 * > 0 = 8051 command RETURN_CODE
8577 */
8578static int do_8051_command(
8579 struct hfi1_devdata *dd,
8580 u32 type,
8581 u64 in_data,
8582 u64 *out_data)
8583{
8584 u64 reg, completed;
8585 int return_code;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008586 unsigned long timeout;
8587
8588 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8589
Tadeusz Struk22546b72017-04-28 10:40:02 -07008590 mutex_lock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008591
8592 /* We can't send any commands to the 8051 if it's in reset */
8593 if (dd->dc_shutdown) {
8594 return_code = -ENODEV;
8595 goto fail;
8596 }
8597
8598 /*
8599 * If an 8051 host command timed out previously, then the 8051 is
8600 * stuck.
8601 *
8602 * On first timeout, attempt to reset and restart the entire DC
8603 * block (including 8051). (Is this too big of a hammer?)
8604 *
8605 * If the 8051 times out a second time, the reset did not bring it
8606 * back to healthy life. In that case, fail any subsequent commands.
8607 */
8608 if (dd->dc8051_timed_out) {
8609 if (dd->dc8051_timed_out > 1) {
8610 dd_dev_err(dd,
8611 "Previous 8051 host command timed out, skipping command %u\n",
8612 type);
8613 return_code = -ENXIO;
8614 goto fail;
8615 }
Tadeusz Struk22546b72017-04-28 10:40:02 -07008616 _dc_shutdown(dd);
8617 _dc_start(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008618 }
8619
8620 /*
8621 * If there is no timeout, then the 8051 command interface is
8622 * waiting for a command.
8623 */
8624
8625 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008626 * When writing a LCB CSR, out_data contains the full value to
8627 * to be written, while in_data contains the relative LCB
8628 * address in 7:0. Do the work here, rather than the caller,
8629 * of distrubting the write data to where it needs to go:
8630 *
8631 * Write data
8632 * 39:00 -> in_data[47:8]
8633 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8634 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8635 */
8636 if (type == HCMD_WRITE_LCB_CSR) {
8637 in_data |= ((*out_data) & 0xffffffffffull) << 8;
Dean Luick00801672016-12-07 19:33:40 -08008638 /* must preserve COMPLETED - it is tied to hardware */
8639 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8640 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8641 reg |= ((((*out_data) >> 40) & 0xff) <<
Dean Luick3bf40d62015-11-06 20:07:04 -05008642 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8643 | ((((*out_data) >> 48) & 0xffff) <<
8644 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8645 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8646 }
8647
8648 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008649 * Do two writes: the first to stabilize the type and req_data, the
8650 * second to activate.
8651 */
8652 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8653 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8654 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8655 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8656 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8657 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8658 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8659
8660 /* wait for completion, alternate: interrupt */
8661 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8662 while (1) {
8663 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8664 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8665 if (completed)
8666 break;
8667 if (time_after(jiffies, timeout)) {
8668 dd->dc8051_timed_out++;
8669 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8670 if (out_data)
8671 *out_data = 0;
8672 return_code = -ETIMEDOUT;
8673 goto fail;
8674 }
8675 udelay(2);
8676 }
8677
8678 if (out_data) {
8679 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8680 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8681 if (type == HCMD_READ_LCB_CSR) {
8682 /* top 16 bits are in a different register */
8683 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8684 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8685 << (48
8686 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8687 }
8688 }
8689 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8690 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8691 dd->dc8051_timed_out = 0;
8692 /*
8693 * Clear command for next user.
8694 */
8695 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8696
8697fail:
Tadeusz Struk22546b72017-04-28 10:40:02 -07008698 mutex_unlock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008699 return return_code;
8700}
8701
8702static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8703{
8704 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8705}
8706
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008707int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8708 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008709{
8710 u64 data;
8711 int ret;
8712
8713 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8714 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8715 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8716 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8717 if (ret != HCMD_SUCCESS) {
8718 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008719 "load 8051 config: field id %d, lane %d, err %d\n",
8720 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008721 }
8722 return ret;
8723}
8724
8725/*
8726 * Read the 8051 firmware "registers". Use the RAM directly. Always
8727 * set the result, even on error.
8728 * Return 0 on success, -errno on failure
8729 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008730int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8731 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008732{
8733 u64 big_data;
8734 u32 addr;
8735 int ret;
8736
8737 /* address start depends on the lane_id */
8738 if (lane_id < 4)
8739 addr = (4 * NUM_GENERAL_FIELDS)
8740 + (lane_id * 4 * NUM_LANE_FIELDS);
8741 else
8742 addr = 0;
8743 addr += field_id * 4;
8744
8745 /* read is in 8-byte chunks, hardware will truncate the address down */
8746 ret = read_8051_data(dd, addr, 8, &big_data);
8747
8748 if (ret == 0) {
8749 /* extract the 4 bytes we want */
8750 if (addr & 0x4)
8751 *result = (u32)(big_data >> 32);
8752 else
8753 *result = (u32)big_data;
8754 } else {
8755 *result = 0;
8756 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008757 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008758 }
8759
8760 return ret;
8761}
8762
8763static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8764 u8 continuous)
8765{
8766 u32 frame;
8767
8768 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8769 | power_management << POWER_MANAGEMENT_SHIFT;
8770 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8771 GENERAL_CONFIG, frame);
8772}
8773
8774static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8775 u16 vl15buf, u8 crc_sizes)
8776{
8777 u32 frame;
8778
8779 frame = (u32)vau << VAU_SHIFT
8780 | (u32)z << Z_SHIFT
8781 | (u32)vcu << VCU_SHIFT
8782 | (u32)vl15buf << VL15BUF_SHIFT
8783 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8784 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8785 GENERAL_CONFIG, frame);
8786}
8787
8788static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8789 u8 *flag_bits, u16 *link_widths)
8790{
8791 u32 frame;
8792
8793 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008794 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008795 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8796 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8797 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8798}
8799
8800static int write_vc_local_link_width(struct hfi1_devdata *dd,
8801 u8 misc_bits,
8802 u8 flag_bits,
8803 u16 link_widths)
8804{
8805 u32 frame;
8806
8807 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8808 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8809 | (u32)link_widths << LINK_WIDTH_SHIFT;
8810 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8811 frame);
8812}
8813
8814static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8815 u8 device_rev)
8816{
8817 u32 frame;
8818
8819 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8820 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8821 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8822}
8823
8824static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8825 u8 *device_rev)
8826{
8827 u32 frame;
8828
8829 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8830 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8831 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8832 & REMOTE_DEVICE_REV_MASK;
8833}
8834
Sebastian Sanchez913cc672017-07-29 08:44:01 -07008835int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8836{
8837 u32 frame;
8838 u32 mask;
8839
8840 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8841 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8842 /* Clear, then set field */
8843 frame &= ~mask;
8844 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8845 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8846 frame);
8847}
8848
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008849void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8850 u8 *ver_patch)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008851{
8852 u32 frame;
8853
8854 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008855 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8856 STS_FM_VERSION_MAJOR_MASK;
8857 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8858 STS_FM_VERSION_MINOR_MASK;
8859
8860 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8861 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8862 STS_FM_VERSION_PATCH_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008863}
8864
8865static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8866 u8 *continuous)
8867{
8868 u32 frame;
8869
8870 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8871 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8872 & POWER_MANAGEMENT_MASK;
8873 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8874 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8875}
8876
8877static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8878 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8879{
8880 u32 frame;
8881
8882 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8883 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8884 *z = (frame >> Z_SHIFT) & Z_MASK;
8885 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8886 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8887 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8888}
8889
8890static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8891 u8 *remote_tx_rate,
8892 u16 *link_widths)
8893{
8894 u32 frame;
8895
8896 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008897 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008898 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8899 & REMOTE_TX_RATE_MASK;
8900 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8901}
8902
8903static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8904{
8905 u32 frame;
8906
8907 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8908 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8909}
8910
8911static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8912{
8913 u32 frame;
8914
8915 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8916 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8917}
8918
8919static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8920{
8921 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8922}
8923
8924static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8925{
8926 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8927}
8928
8929void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8930{
8931 u32 frame;
8932 int ret;
8933
8934 *link_quality = 0;
8935 if (dd->pport->host_link_state & HLS_UP) {
8936 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008937 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008938 if (ret == 0)
8939 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8940 & LINK_QUALITY_MASK;
8941 }
8942}
8943
8944static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8945{
8946 u32 frame;
8947
8948 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8949 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8950}
8951
Dean Luickfeb831d2016-04-14 08:31:36 -07008952static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8953{
8954 u32 frame;
8955
8956 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8957 *ldr = (frame & 0xff);
8958}
8959
Mike Marciniszyn77241052015-07-30 15:17:43 -04008960static int read_tx_settings(struct hfi1_devdata *dd,
8961 u8 *enable_lane_tx,
8962 u8 *tx_polarity_inversion,
8963 u8 *rx_polarity_inversion,
8964 u8 *max_rate)
8965{
8966 u32 frame;
8967 int ret;
8968
8969 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8970 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8971 & ENABLE_LANE_TX_MASK;
8972 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8973 & TX_POLARITY_INVERSION_MASK;
8974 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8975 & RX_POLARITY_INVERSION_MASK;
8976 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8977 return ret;
8978}
8979
8980static int write_tx_settings(struct hfi1_devdata *dd,
8981 u8 enable_lane_tx,
8982 u8 tx_polarity_inversion,
8983 u8 rx_polarity_inversion,
8984 u8 max_rate)
8985{
8986 u32 frame;
8987
8988 /* no need to mask, all variable sizes match field widths */
8989 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8990 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8991 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8992 | max_rate << MAX_RATE_SHIFT;
8993 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8994}
8995
Mike Marciniszyn77241052015-07-30 15:17:43 -04008996/*
8997 * Read an idle LCB message.
8998 *
8999 * Returns 0 on success, -EINVAL on error
9000 */
9001static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9002{
9003 int ret;
9004
Jubin John17fb4f22016-02-14 20:21:52 -08009005 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009006 if (ret != HCMD_SUCCESS) {
9007 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08009008 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009009 return -EINVAL;
9010 }
9011 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9012 /* return only the payload as we already know the type */
9013 *data_out >>= IDLE_PAYLOAD_SHIFT;
9014 return 0;
9015}
9016
9017/*
9018 * Read an idle SMA message. To be done in response to a notification from
9019 * the 8051.
9020 *
9021 * Returns 0 on success, -EINVAL on error
9022 */
9023static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9024{
Jubin John17fb4f22016-02-14 20:21:52 -08009025 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9026 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009027}
9028
9029/*
9030 * Send an idle LCB message.
9031 *
9032 * Returns 0 on success, -EINVAL on error
9033 */
9034static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9035{
9036 int ret;
9037
9038 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9039 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9040 if (ret != HCMD_SUCCESS) {
9041 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08009042 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009043 return -EINVAL;
9044 }
9045 return 0;
9046}
9047
9048/*
9049 * Send an idle SMA message.
9050 *
9051 * Returns 0 on success, -EINVAL on error
9052 */
9053int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9054{
9055 u64 data;
9056
Jubin John17fb4f22016-02-14 20:21:52 -08009057 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9058 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009059 return send_idle_message(dd, data);
9060}
9061
9062/*
9063 * Initialize the LCB then do a quick link up. This may or may not be
9064 * in loopback.
9065 *
9066 * return 0 on success, -errno on error
9067 */
9068static int do_quick_linkup(struct hfi1_devdata *dd)
9069{
Mike Marciniszyn77241052015-07-30 15:17:43 -04009070 int ret;
9071
9072 lcb_shutdown(dd, 0);
9073
9074 if (loopback) {
9075 /* LCB_CFG_LOOPBACK.VAL = 2 */
9076 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9077 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08009078 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009079 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9080 }
9081
9082 /* start the LCBs */
9083 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9084 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9085
9086 /* simulator only loopback steps */
9087 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9088 /* LCB_CFG_RUN.EN = 1 */
9089 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08009090 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009091
Dean Luickec8a1422017-03-20 17:24:39 -07009092 ret = wait_link_transfer_active(dd, 10);
9093 if (ret)
9094 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009095
9096 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08009097 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009098 }
9099
9100 if (!loopback) {
9101 /*
9102 * When doing quick linkup and not in loopback, both
9103 * sides must be done with LCB set-up before either
9104 * starts the quick linkup. Put a delay here so that
9105 * both sides can be started and have a chance to be
9106 * done with LCB set up before resuming.
9107 */
9108 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009109 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009110 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08009111 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009112 }
9113
9114 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9115 set_8051_lcb_access(dd);
9116
9117 /*
9118 * State "quick" LinkUp request sets the physical link state to
9119 * LinkUp without a verify capability sequence.
9120 * This state is in simulator v37 and later.
9121 */
9122 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9123 if (ret != HCMD_SUCCESS) {
9124 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009125 "%s: set physical link state to quick LinkUp failed with return %d\n",
9126 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009127
9128 set_host_lcb_access(dd);
9129 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9130
9131 if (ret >= 0)
9132 ret = -EINVAL;
9133 return ret;
9134 }
9135
9136 return 0; /* success */
9137}
9138
9139/*
9140 * Set the SerDes to internal loopback mode.
9141 * Returns 0 on success, -errno on error.
9142 */
9143static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9144{
9145 int ret;
9146
9147 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9148 if (ret == HCMD_SUCCESS)
9149 return 0;
9150 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009151 "Set physical link state to SerDes Loopback failed with return %d\n",
9152 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009153 if (ret >= 0)
9154 ret = -EINVAL;
9155 return ret;
9156}
9157
9158/*
9159 * Do all special steps to set up loopback.
9160 */
9161static int init_loopback(struct hfi1_devdata *dd)
9162{
9163 dd_dev_info(dd, "Entering loopback mode\n");
9164
9165 /* all loopbacks should disable self GUID check */
9166 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08009167 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009168
9169 /*
9170 * The simulator has only one loopback option - LCB. Switch
9171 * to that option, which includes quick link up.
9172 *
9173 * Accept all valid loopback values.
9174 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009175 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9176 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9177 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009178 loopback = LOOPBACK_LCB;
9179 quick_linkup = 1;
9180 return 0;
9181 }
9182
9183 /* handle serdes loopback */
9184 if (loopback == LOOPBACK_SERDES) {
9185 /* internal serdes loopack needs quick linkup on RTL */
9186 if (dd->icode == ICODE_RTL_SILICON)
9187 quick_linkup = 1;
9188 return set_serdes_loopback_mode(dd);
9189 }
9190
9191 /* LCB loopback - handled at poll time */
9192 if (loopback == LOOPBACK_LCB) {
9193 quick_linkup = 1; /* LCB is always quick linkup */
9194
9195 /* not supported in emulation due to emulation RTL changes */
9196 if (dd->icode == ICODE_FPGA_EMULATION) {
9197 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009198 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009199 return -EINVAL;
9200 }
9201 return 0;
9202 }
9203
9204 /* external cable loopback requires no extra steps */
9205 if (loopback == LOOPBACK_CABLE)
9206 return 0;
9207
9208 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9209 return -EINVAL;
9210}
9211
9212/*
9213 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9214 * used in the Verify Capability link width attribute.
9215 */
9216static u16 opa_to_vc_link_widths(u16 opa_widths)
9217{
9218 int i;
9219 u16 result = 0;
9220
9221 static const struct link_bits {
9222 u16 from;
9223 u16 to;
9224 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009225 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9226 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9227 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9228 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009229 };
9230
9231 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9232 if (opa_widths & opa_link_xlate[i].from)
9233 result |= opa_link_xlate[i].to;
9234 }
9235 return result;
9236}
9237
9238/*
9239 * Set link attributes before moving to polling.
9240 */
9241static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9242{
9243 struct hfi1_devdata *dd = ppd->dd;
9244 u8 enable_lane_tx;
9245 u8 tx_polarity_inversion;
9246 u8 rx_polarity_inversion;
9247 int ret;
9248
9249 /* reset our fabric serdes to clear any lingering problems */
9250 fabric_serdes_reset(dd);
9251
9252 /* set the local tx rate - need to read-modify-write */
9253 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009254 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009255 if (ret)
9256 goto set_local_link_attributes_fail;
9257
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07009258 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009259 /* set the tx rate to the fastest enabled */
9260 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9261 ppd->local_tx_rate = 1;
9262 else
9263 ppd->local_tx_rate = 0;
9264 } else {
9265 /* set the tx rate to all enabled */
9266 ppd->local_tx_rate = 0;
9267 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9268 ppd->local_tx_rate |= 2;
9269 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9270 ppd->local_tx_rate |= 1;
9271 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009272
9273 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009274 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009275 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009276 if (ret != HCMD_SUCCESS)
9277 goto set_local_link_attributes_fail;
9278
9279 /*
9280 * DC supports continuous updates.
9281 */
Jubin John17fb4f22016-02-14 20:21:52 -08009282 ret = write_vc_local_phy(dd,
9283 0 /* no power management */,
9284 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009285 if (ret != HCMD_SUCCESS)
9286 goto set_local_link_attributes_fail;
9287
9288 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9289 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9290 ppd->port_crc_mode_enabled);
9291 if (ret != HCMD_SUCCESS)
9292 goto set_local_link_attributes_fail;
9293
9294 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009295 opa_to_vc_link_widths(
9296 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009297 if (ret != HCMD_SUCCESS)
9298 goto set_local_link_attributes_fail;
9299
9300 /* let peer know who we are */
9301 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9302 if (ret == HCMD_SUCCESS)
9303 return 0;
9304
9305set_local_link_attributes_fail:
9306 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009307 "Failed to set local link attributes, return 0x%x\n",
9308 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009309 return ret;
9310}
9311
9312/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009313 * Call this to start the link.
9314 * Do not do anything if the link is disabled.
9315 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009316 */
9317int start_link(struct hfi1_pportdata *ppd)
9318{
Dean Luick0db9dec2016-09-06 04:35:20 -07009319 /*
9320 * Tune the SerDes to a ballpark setting for optimal signal and bit
9321 * error rate. Needs to be done before starting the link.
9322 */
9323 tune_serdes(ppd);
9324
Mike Marciniszyn77241052015-07-30 15:17:43 -04009325 if (!ppd->driver_link_ready) {
9326 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009327 "%s: stopping link start because driver is not ready\n",
9328 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009329 return 0;
9330 }
9331
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07009332 /*
9333 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9334 * pkey table can be configured properly if the HFI unit is connected
9335 * to switch port with MgmtAllowed=NO
9336 */
9337 clear_full_mgmt_pkey(ppd);
9338
Easwar Hariharan623bba22016-04-12 11:25:57 -07009339 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009340}
9341
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009342static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9343{
9344 struct hfi1_devdata *dd = ppd->dd;
9345 u64 mask;
9346 unsigned long timeout;
9347
9348 /*
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009349 * Some QSFP cables have a quirk that asserts the IntN line as a side
9350 * effect of power up on plug-in. We ignore this false positive
9351 * interrupt until the module has finished powering up by waiting for
9352 * a minimum timeout of the module inrush initialization time of
9353 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9354 * module have stabilized.
9355 */
9356 msleep(500);
9357
9358 /*
9359 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009360 */
9361 timeout = jiffies + msecs_to_jiffies(2000);
9362 while (1) {
9363 mask = read_csr(dd, dd->hfi1_id ?
9364 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009365 if (!(mask & QSFP_HFI0_INT_N))
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009366 break;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009367 if (time_after(jiffies, timeout)) {
9368 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9369 __func__);
9370 break;
9371 }
9372 udelay(2);
9373 }
9374}
9375
9376static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9377{
9378 struct hfi1_devdata *dd = ppd->dd;
9379 u64 mask;
9380
9381 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009382 if (enable) {
9383 /*
9384 * Clear the status register to avoid an immediate interrupt
9385 * when we re-enable the IntN pin
9386 */
9387 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9388 QSFP_HFI0_INT_N);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009389 mask |= (u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009390 } else {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009391 mask &= ~(u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009392 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009393 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9394}
9395
9396void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009397{
9398 struct hfi1_devdata *dd = ppd->dd;
9399 u64 mask, qsfp_mask;
9400
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009401 /* Disable INT_N from triggering QSFP interrupts */
9402 set_qsfp_int_n(ppd, 0);
9403
9404 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009405 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009406
9407 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009408 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009409 qsfp_mask &= ~mask;
9410 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009411 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009412
9413 udelay(10);
9414
9415 qsfp_mask |= mask;
9416 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009417 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009418
9419 wait_for_qsfp_init(ppd);
9420
9421 /*
9422 * Allow INT_N to trigger the QSFP interrupt to watch
9423 * for alarms and warnings
9424 */
9425 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009426}
9427
9428static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9429 u8 *qsfp_interrupt_status)
9430{
9431 struct hfi1_devdata *dd = ppd->dd;
9432
9433 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009434 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009435 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9436 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009437
9438 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009439 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009440 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9441 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009442
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009443 /*
9444 * The remaining alarms/warnings don't matter if the link is down.
9445 */
9446 if (ppd->host_link_state & HLS_DOWN)
9447 return 0;
9448
Mike Marciniszyn77241052015-07-30 15:17:43 -04009449 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009450 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009451 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9452 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009453
9454 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009455 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009456 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9457 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009458
9459 /* Byte 2 is vendor specific */
9460
9461 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009462 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009463 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9464 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009465
9466 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009467 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009468 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9469 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009470
9471 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009472 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009473 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9474 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009475
9476 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009477 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009478 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9479 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009480
9481 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009482 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009483 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9484 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009485
9486 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009487 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009488 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9489 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009490
9491 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009492 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009493 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9494 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009495
9496 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009497 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009498 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9499 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009500
9501 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009502 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009503 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9504 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009505
9506 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009507 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009508 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9509 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009510
9511 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009512 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009513 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9514 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009515
9516 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009517 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
Jan Sokolowski702265f2017-06-09 15:59:33 -07009518 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9519 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009520
9521 /* Bytes 9-10 and 11-12 are reserved */
9522 /* Bytes 13-15 are vendor specific */
9523
9524 return 0;
9525}
9526
Easwar Hariharan623bba22016-04-12 11:25:57 -07009527/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009528void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009529{
9530 struct qsfp_data *qd;
9531 struct hfi1_pportdata *ppd;
9532 struct hfi1_devdata *dd;
9533
9534 qd = container_of(work, struct qsfp_data, qsfp_work);
9535 ppd = qd->ppd;
9536 dd = ppd->dd;
9537
9538 /* Sanity check */
9539 if (!qsfp_mod_present(ppd))
9540 return;
9541
Jan Sokolowski96603ed2017-07-29 08:43:26 -07009542 if (ppd->host_link_state == HLS_DN_DISABLE) {
9543 dd_dev_info(ppd->dd,
9544 "%s: stopping link start because link is disabled\n",
9545 __func__);
9546 return;
9547 }
9548
Mike Marciniszyn77241052015-07-30 15:17:43 -04009549 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009550 * Turn DC back on after cable has been re-inserted. Up until
9551 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009552 */
9553 dc_start(dd);
9554
9555 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009556 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009557
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009558 wait_for_qsfp_init(ppd);
9559
9560 /*
9561 * Allow INT_N to trigger the QSFP interrupt to watch
9562 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009563 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009564 set_qsfp_int_n(ppd, 1);
9565
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009566 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009567 }
9568
9569 if (qd->check_interrupt_flags) {
9570 u8 qsfp_interrupt_status[16] = {0,};
9571
Dean Luick765a6fa2016-03-05 08:50:06 -08009572 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9573 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009574 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009575 "%s: Failed to read status of QSFP module\n",
9576 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009577 } else {
9578 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009579
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009580 handle_qsfp_error_conditions(
9581 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009582 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9583 ppd->qsfp_info.check_interrupt_flags = 0;
9584 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009585 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009586 }
9587 }
9588}
9589
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009590static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009591{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009592 struct hfi1_pportdata *ppd = dd->pport;
9593 u64 qsfp_mask, cce_int_mask;
9594 const int qsfp1_int_smask = QSFP1_INT % 64;
9595 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009596
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009597 /*
9598 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9599 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9600 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9601 * the index of the appropriate CSR in the CCEIntMask CSR array
9602 */
9603 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9604 (8 * (QSFP1_INT / 64)));
9605 if (dd->hfi1_id) {
9606 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9607 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9608 cce_int_mask);
9609 } else {
9610 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9611 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9612 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009613 }
9614
Mike Marciniszyn77241052015-07-30 15:17:43 -04009615 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9616 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009617 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9618 qsfp_mask);
9619 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9620 qsfp_mask);
9621
9622 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009623
9624 /* Handle active low nature of INT_N and MODPRST_N pins */
9625 if (qsfp_mod_present(ppd))
9626 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9627 write_csr(dd,
9628 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9629 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009630}
9631
Dean Luickbbdeb332015-12-01 15:38:15 -05009632/*
9633 * Do a one-time initialize of the LCB block.
9634 */
9635static void init_lcb(struct hfi1_devdata *dd)
9636{
Dean Luicka59329d2016-02-03 14:32:31 -08009637 /* simulator does not correctly handle LCB cclk loopback, skip */
9638 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9639 return;
9640
Dean Luickbbdeb332015-12-01 15:38:15 -05009641 /* the DC has been reset earlier in the driver load */
9642
9643 /* set LCB for cclk loopback on the port */
9644 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9645 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9646 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9647 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9648 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9649 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9650 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9651}
9652
Dean Luick673b9752016-08-31 07:24:33 -07009653/*
9654 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9655 * on error.
9656 */
9657static int test_qsfp_read(struct hfi1_pportdata *ppd)
9658{
9659 int ret;
9660 u8 status;
9661
Easwar Hariharanfb897ad2017-03-20 17:25:42 -07009662 /*
9663 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9664 * not present
9665 */
9666 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
Dean Luick673b9752016-08-31 07:24:33 -07009667 return 0;
9668
9669 /* read byte 2, the status byte */
9670 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9671 if (ret < 0)
9672 return ret;
9673 if (ret != 1)
9674 return -EIO;
9675
9676 return 0; /* success */
9677}
9678
9679/*
9680 * Values for QSFP retry.
9681 *
9682 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9683 * arrived at from experience on a large cluster.
9684 */
9685#define MAX_QSFP_RETRIES 20
9686#define QSFP_RETRY_WAIT 500 /* msec */
9687
9688/*
9689 * Try a QSFP read. If it fails, schedule a retry for later.
9690 * Called on first link activation after driver load.
9691 */
9692static void try_start_link(struct hfi1_pportdata *ppd)
9693{
9694 if (test_qsfp_read(ppd)) {
9695 /* read failed */
9696 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9697 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9698 return;
9699 }
9700 dd_dev_info(ppd->dd,
9701 "QSFP not responding, waiting and retrying %d\n",
9702 (int)ppd->qsfp_retry_count);
9703 ppd->qsfp_retry_count++;
Sebastian Sanchez71d47002017-07-29 08:43:49 -07009704 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
Dean Luick673b9752016-08-31 07:24:33 -07009705 msecs_to_jiffies(QSFP_RETRY_WAIT));
9706 return;
9707 }
9708 ppd->qsfp_retry_count = 0;
9709
Dean Luick673b9752016-08-31 07:24:33 -07009710 start_link(ppd);
9711}
9712
9713/*
9714 * Workqueue function to start the link after a delay.
9715 */
9716void handle_start_link(struct work_struct *work)
9717{
9718 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9719 start_link_work.work);
9720 try_start_link(ppd);
9721}
9722
Mike Marciniszyn77241052015-07-30 15:17:43 -04009723int bringup_serdes(struct hfi1_pportdata *ppd)
9724{
9725 struct hfi1_devdata *dd = ppd->dd;
9726 u64 guid;
9727 int ret;
9728
9729 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9730 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9731
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009732 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
Mike Marciniszyn77241052015-07-30 15:17:43 -04009733 if (!guid) {
9734 if (dd->base_guid)
9735 guid = dd->base_guid + ppd->port - 1;
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009736 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009737 }
9738
Mike Marciniszyn77241052015-07-30 15:17:43 -04009739 /* Set linkinit_reason on power up per OPA spec */
9740 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9741
Dean Luickbbdeb332015-12-01 15:38:15 -05009742 /* one-time init of the LCB */
9743 init_lcb(dd);
9744
Mike Marciniszyn77241052015-07-30 15:17:43 -04009745 if (loopback) {
9746 ret = init_loopback(dd);
9747 if (ret < 0)
9748 return ret;
9749 }
9750
Easwar Hariharan9775a992016-05-12 10:22:39 -07009751 get_port_type(ppd);
9752 if (ppd->port_type == PORT_TYPE_QSFP) {
9753 set_qsfp_int_n(ppd, 0);
9754 wait_for_qsfp_init(ppd);
9755 set_qsfp_int_n(ppd, 1);
9756 }
9757
Dean Luick673b9752016-08-31 07:24:33 -07009758 try_start_link(ppd);
9759 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009760}
9761
9762void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9763{
9764 struct hfi1_devdata *dd = ppd->dd;
9765
9766 /*
9767 * Shut down the link and keep it down. First turn off that the
9768 * driver wants to allow the link to be up (driver_link_ready).
9769 * Then make sure the link is not automatically restarted
9770 * (link_enabled). Cancel any pending restart. And finally
9771 * go offline.
9772 */
9773 ppd->driver_link_ready = 0;
9774 ppd->link_enabled = 0;
9775
Dean Luick673b9752016-08-31 07:24:33 -07009776 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9777 flush_delayed_work(&ppd->start_link_work);
9778 cancel_delayed_work_sync(&ppd->start_link_work);
9779
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009780 ppd->offline_disabled_reason =
9781 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009782 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009783 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009784 set_link_state(ppd, HLS_DN_OFFLINE);
9785
9786 /* disable the port */
9787 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9788}
9789
9790static inline int init_cpu_counters(struct hfi1_devdata *dd)
9791{
9792 struct hfi1_pportdata *ppd;
9793 int i;
9794
9795 ppd = (struct hfi1_pportdata *)(dd + 1);
9796 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009797 ppd->ibport_data.rvp.rc_acks = NULL;
9798 ppd->ibport_data.rvp.rc_qacks = NULL;
9799 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9800 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9801 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9802 if (!ppd->ibport_data.rvp.rc_acks ||
9803 !ppd->ibport_data.rvp.rc_delayed_comp ||
9804 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009805 return -ENOMEM;
9806 }
9807
9808 return 0;
9809}
9810
Mike Marciniszyn77241052015-07-30 15:17:43 -04009811/*
9812 * index is the index into the receive array
9813 */
9814void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9815 u32 type, unsigned long pa, u16 order)
9816{
9817 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009818
9819 if (!(dd->flags & HFI1_PRESENT))
9820 goto done;
9821
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009822 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009823 pa = 0;
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009824 order = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009825 } else if (type > PT_INVALID) {
9826 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009827 "unexpected receive array type %u for index %u, not handled\n",
9828 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009829 goto done;
9830 }
Mike Marciniszyn8cb10212017-06-09 15:59:59 -07009831 trace_hfi1_put_tid(dd, index, type, pa, order);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009832
9833#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9834 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9835 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9836 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9837 << RCV_ARRAY_RT_ADDR_SHIFT;
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009838 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9839 writeq(reg, dd->rcvarray_wc + (index * 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009840
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009841 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009842 /*
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -07009843 * Eager entries are written and flushed
9844 *
9845 * Expected entries are flushed every 4 writes
Mike Marciniszyn77241052015-07-30 15:17:43 -04009846 */
9847 flush_wc();
9848done:
9849 return;
9850}
9851
9852void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9853{
9854 struct hfi1_devdata *dd = rcd->dd;
9855 u32 i;
9856
9857 /* this could be optimized */
9858 for (i = rcd->eager_base; i < rcd->eager_base +
9859 rcd->egrbufs.alloced; i++)
9860 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9861
9862 for (i = rcd->expected_base;
9863 i < rcd->expected_base + rcd->expected_count; i++)
9864 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9865}
9866
Mike Marciniszyn77241052015-07-30 15:17:43 -04009867static const char * const ib_cfg_name_strings[] = {
9868 "HFI1_IB_CFG_LIDLMC",
9869 "HFI1_IB_CFG_LWID_DG_ENB",
9870 "HFI1_IB_CFG_LWID_ENB",
9871 "HFI1_IB_CFG_LWID",
9872 "HFI1_IB_CFG_SPD_ENB",
9873 "HFI1_IB_CFG_SPD",
9874 "HFI1_IB_CFG_RXPOL_ENB",
9875 "HFI1_IB_CFG_LREV_ENB",
9876 "HFI1_IB_CFG_LINKLATENCY",
9877 "HFI1_IB_CFG_HRTBT",
9878 "HFI1_IB_CFG_OP_VLS",
9879 "HFI1_IB_CFG_VL_HIGH_CAP",
9880 "HFI1_IB_CFG_VL_LOW_CAP",
9881 "HFI1_IB_CFG_OVERRUN_THRESH",
9882 "HFI1_IB_CFG_PHYERR_THRESH",
9883 "HFI1_IB_CFG_LINKDEFAULT",
9884 "HFI1_IB_CFG_PKEYS",
9885 "HFI1_IB_CFG_MTU",
9886 "HFI1_IB_CFG_LSTATE",
9887 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9888 "HFI1_IB_CFG_PMA_TICKS",
9889 "HFI1_IB_CFG_PORT"
9890};
9891
9892static const char *ib_cfg_name(int which)
9893{
9894 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9895 return "invalid";
9896 return ib_cfg_name_strings[which];
9897}
9898
9899int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9900{
9901 struct hfi1_devdata *dd = ppd->dd;
9902 int val = 0;
9903
9904 switch (which) {
9905 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9906 val = ppd->link_width_enabled;
9907 break;
9908 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9909 val = ppd->link_width_active;
9910 break;
9911 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9912 val = ppd->link_speed_enabled;
9913 break;
9914 case HFI1_IB_CFG_SPD: /* current Link speed */
9915 val = ppd->link_speed_active;
9916 break;
9917
9918 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9919 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9920 case HFI1_IB_CFG_LINKLATENCY:
9921 goto unimplemented;
9922
9923 case HFI1_IB_CFG_OP_VLS:
9924 val = ppd->vls_operational;
9925 break;
9926 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9927 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9928 break;
9929 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9930 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9931 break;
9932 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9933 val = ppd->overrun_threshold;
9934 break;
9935 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9936 val = ppd->phy_error_threshold;
9937 break;
9938 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9939 val = dd->link_default;
9940 break;
9941
9942 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9943 case HFI1_IB_CFG_PMA_TICKS:
9944 default:
9945unimplemented:
9946 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9947 dd_dev_info(
9948 dd,
9949 "%s: which %s: not implemented\n",
9950 __func__,
9951 ib_cfg_name(which));
9952 break;
9953 }
9954
9955 return val;
9956}
9957
9958/*
9959 * The largest MAD packet size.
9960 */
9961#define MAX_MAD_PACKET 2048
9962
9963/*
9964 * Return the maximum header bytes that can go on the _wire_
9965 * for this device. This count includes the ICRC which is
9966 * not part of the packet held in memory but it is appended
9967 * by the HW.
9968 * This is dependent on the device's receive header entry size.
9969 * HFI allows this to be set per-receive context, but the
9970 * driver presently enforces a global value.
9971 */
9972u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9973{
9974 /*
9975 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9976 * the Receive Header Entry Size minus the PBC (or RHF) size
9977 * plus one DW for the ICRC appended by HW.
9978 *
9979 * dd->rcd[0].rcvhdrqentsize is in DW.
9980 * We use rcd[0] as all context will have the same value. Also,
9981 * the first kernel context would have been allocated by now so
9982 * we are guaranteed a valid value.
9983 */
9984 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9985}
9986
9987/*
9988 * Set Send Length
9989 * @ppd - per port data
9990 *
9991 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9992 * registers compare against LRH.PktLen, so use the max bytes included
9993 * in the LRH.
9994 *
9995 * This routine changes all VL values except VL15, which it maintains at
9996 * the same value.
9997 */
9998static void set_send_length(struct hfi1_pportdata *ppd)
9999{
10000 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -050010001 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10002 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010003 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10004 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10005 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
Jubin Johnb4ba6632016-06-09 07:51:08 -070010006 int i, j;
Jianxin Xiong44306f12016-04-12 11:30:28 -070010007 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010008
10009 for (i = 0; i < ppd->vls_supported; i++) {
10010 if (dd->vld[i].mtu > maxvlmtu)
10011 maxvlmtu = dd->vld[i].mtu;
10012 if (i <= 3)
10013 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10014 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10015 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10016 else
10017 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10018 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10019 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10020 }
10021 write_csr(dd, SEND_LEN_CHECK0, len1);
10022 write_csr(dd, SEND_LEN_CHECK1, len2);
10023 /* adjust kernel credit return thresholds based on new MTUs */
10024 /* all kernel receive contexts have the same hdrqentsize */
10025 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -070010026 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10027 sc_mtu_to_threshold(dd->vld[i].sc,
10028 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -080010029 dd->rcd[0]->rcvhdrqentsize));
Jubin Johnb4ba6632016-06-09 07:51:08 -070010030 for (j = 0; j < INIT_SC_PER_VL; j++)
10031 sc_set_cr_threshold(
10032 pio_select_send_context_vl(dd, j, i),
10033 thres);
Jianxin Xiong44306f12016-04-12 11:30:28 -070010034 }
10035 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10036 sc_mtu_to_threshold(dd->vld[15].sc,
10037 dd->vld[15].mtu,
10038 dd->rcd[0]->rcvhdrqentsize));
10039 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010040
10041 /* Adjust maximum MTU for the port in DC */
10042 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10043 (ilog2(maxvlmtu >> 8) + 1);
10044 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10045 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10046 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10047 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10048 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10049}
10050
10051static void set_lidlmc(struct hfi1_pportdata *ppd)
10052{
10053 int i;
10054 u64 sreg = 0;
10055 struct hfi1_devdata *dd = ppd->dd;
10056 u32 mask = ~((1U << ppd->lmc) - 1);
10057 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10058
Mike Marciniszyn77241052015-07-30 15:17:43 -040010059 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10060 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10061 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -080010062 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -040010063 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10064 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10065 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10066
10067 /*
10068 * Iterate over all the send contexts and set their SLID check
10069 */
10070 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10071 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10072 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10073 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10074
10075 for (i = 0; i < dd->chip_send_contexts; i++) {
10076 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10077 i, (u32)sreg);
10078 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10079 }
10080
10081 /* Now we have to do the same thing for the sdma engines */
10082 sdma_update_lmc(dd, mask, ppd->lid);
10083}
10084
Dean Luick6854c692016-07-25 13:38:56 -070010085static const char *state_completed_string(u32 completed)
10086{
10087 static const char * const state_completed[] = {
10088 "EstablishComm",
10089 "OptimizeEQ",
10090 "VerifyCap"
10091 };
10092
10093 if (completed < ARRAY_SIZE(state_completed))
10094 return state_completed[completed];
10095
10096 return "unknown";
10097}
10098
10099static const char all_lanes_dead_timeout_expired[] =
10100 "All lanes were inactive – was the interconnect media removed?";
10101static const char tx_out_of_policy[] =
10102 "Passing lanes on local port do not meet the local link width policy";
10103static const char no_state_complete[] =
10104 "State timeout occurred before link partner completed the state";
10105static const char * const state_complete_reasons[] = {
10106 [0x00] = "Reason unknown",
10107 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10108 [0x02] = "Link partner reported failure",
10109 [0x10] = "Unable to achieve frame sync on any lane",
10110 [0x11] =
10111 "Unable to find a common bit rate with the link partner",
10112 [0x12] =
10113 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10114 [0x13] =
10115 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10116 [0x14] = no_state_complete,
10117 [0x15] =
10118 "State timeout occurred before link partner identified equalization presets",
10119 [0x16] =
10120 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10121 [0x17] = tx_out_of_policy,
10122 [0x20] = all_lanes_dead_timeout_expired,
10123 [0x21] =
10124 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10125 [0x22] = no_state_complete,
10126 [0x23] =
10127 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10128 [0x24] = tx_out_of_policy,
10129 [0x30] = all_lanes_dead_timeout_expired,
10130 [0x31] =
10131 "State timeout occurred waiting for host to process received frames",
10132 [0x32] = no_state_complete,
10133 [0x33] =
10134 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10135 [0x34] = tx_out_of_policy,
10136};
10137
10138static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10139 u32 code)
10140{
10141 const char *str = NULL;
10142
10143 if (code < ARRAY_SIZE(state_complete_reasons))
10144 str = state_complete_reasons[code];
10145
10146 if (str)
10147 return str;
10148 return "Reserved";
10149}
10150
10151/* describe the given last state complete frame */
10152static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10153 const char *prefix)
10154{
10155 struct hfi1_devdata *dd = ppd->dd;
10156 u32 success;
10157 u32 state;
10158 u32 reason;
10159 u32 lanes;
10160
10161 /*
10162 * Decode frame:
10163 * [ 0: 0] - success
10164 * [ 3: 1] - state
10165 * [ 7: 4] - next state timeout
10166 * [15: 8] - reason code
10167 * [31:16] - lanes
10168 */
10169 success = frame & 0x1;
10170 state = (frame >> 1) & 0x7;
10171 reason = (frame >> 8) & 0xff;
10172 lanes = (frame >> 16) & 0xffff;
10173
10174 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10175 prefix, frame);
10176 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10177 state_completed_string(state), state);
10178 dd_dev_err(dd, " state successfully completed: %s\n",
10179 success ? "yes" : "no");
10180 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10181 reason, state_complete_reason_code_string(ppd, reason));
10182 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10183}
10184
10185/*
10186 * Read the last state complete frames and explain them. This routine
10187 * expects to be called if the link went down during link negotiation
10188 * and initialization (LNI). That is, anywhere between polling and link up.
10189 */
10190static void check_lni_states(struct hfi1_pportdata *ppd)
10191{
10192 u32 last_local_state;
10193 u32 last_remote_state;
10194
10195 read_last_local_state(ppd->dd, &last_local_state);
10196 read_last_remote_state(ppd->dd, &last_remote_state);
10197
10198 /*
10199 * Don't report anything if there is nothing to report. A value of
10200 * 0 means the link was taken down while polling and there was no
10201 * training in-process.
10202 */
10203 if (last_local_state == 0 && last_remote_state == 0)
10204 return;
10205
10206 decode_state_complete(ppd, last_local_state, "transmitted");
10207 decode_state_complete(ppd, last_remote_state, "received");
10208}
10209
Dean Luickec8a1422017-03-20 17:24:39 -070010210/* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10211static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10212{
10213 u64 reg;
10214 unsigned long timeout;
10215
10216 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10217 timeout = jiffies + msecs_to_jiffies(wait_ms);
10218 while (1) {
10219 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10220 if (reg)
10221 break;
10222 if (time_after(jiffies, timeout)) {
10223 dd_dev_err(dd,
10224 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10225 return -ETIMEDOUT;
10226 }
10227 udelay(2);
10228 }
10229 return 0;
10230}
10231
10232/* called when the logical link state is not down as it should be */
10233static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10234{
10235 struct hfi1_devdata *dd = ppd->dd;
10236
10237 /*
10238 * Bring link up in LCB loopback
10239 */
10240 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10241 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10242 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10243
10244 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10245 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10246 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10247 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10248
10249 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10250 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10251 udelay(3);
10252 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10253 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10254
10255 wait_link_transfer_active(dd, 100);
10256
10257 /*
10258 * Bring the link down again.
10259 */
10260 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10261 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10262 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10263
10264 /* call again to adjust ppd->statusp, if needed */
10265 get_logical_state(ppd);
10266}
10267
Mike Marciniszyn77241052015-07-30 15:17:43 -040010268/*
10269 * Helper for set_link_state(). Do not call except from that routine.
10270 * Expects ppd->hls_mutex to be held.
10271 *
10272 * @rem_reason value to be sent to the neighbor
10273 *
10274 * LinkDownReasons only set if transition succeeds.
10275 */
10276static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10277{
10278 struct hfi1_devdata *dd = ppd->dd;
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010279 u32 previous_state;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010280 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010281
Michael J. Ruhl86884262017-03-20 17:24:51 -070010282 update_lcb_cache(dd);
10283
Mike Marciniszyn77241052015-07-30 15:17:43 -040010284 previous_state = ppd->host_link_state;
10285 ppd->host_link_state = HLS_GOING_OFFLINE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010286
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010287 /* start offline transition */
10288 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010289
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010290 if (ret != HCMD_SUCCESS) {
10291 dd_dev_err(dd,
10292 "Failed to transition to Offline link state, return %d\n",
10293 ret);
10294 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010295 }
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010296 if (ppd->offline_disabled_reason ==
10297 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10298 ppd->offline_disabled_reason =
10299 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010300
Sebastian Sanchez913cc672017-07-29 08:44:01 -070010301 /*
10302 * Wait for offline transition. It can take a while for
10303 * the link to go down.
10304 */
10305 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
10306 if (ret < 0)
10307 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010308
Mike Marciniszyn77241052015-07-30 15:17:43 -040010309 /*
10310 * Now in charge of LCB - must be after the physical state is
10311 * offline.quiet and before host_link_state is changed.
10312 */
10313 set_host_lcb_access(dd);
10314 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
Dean Luickec8a1422017-03-20 17:24:39 -070010315
10316 /* make sure the logical state is also down */
10317 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10318 if (ret)
10319 force_logical_link_state_down(ppd);
10320
Mike Marciniszyn77241052015-07-30 15:17:43 -040010321 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10322
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010323 if (ppd->port_type == PORT_TYPE_QSFP &&
10324 ppd->qsfp_info.limiting_active &&
10325 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -080010326 int ret;
10327
10328 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10329 if (ret == 0) {
10330 set_qsfp_tx(ppd, 0);
10331 release_chip_resource(dd, qsfp_resource(dd));
10332 } else {
10333 /* not fatal, but should warn */
10334 dd_dev_err(dd,
10335 "Unable to acquire lock to turn off QSFP TX\n");
10336 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010337 }
10338
Mike Marciniszyn77241052015-07-30 15:17:43 -040010339 /*
10340 * The LNI has a mandatory wait time after the physical state
10341 * moves to Offline.Quiet. The wait time may be different
10342 * depending on how the link went down. The 8051 firmware
10343 * will observe the needed wait time and only move to ready
10344 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -050010345 * is 6s, so wait that long and then at least 0.5s more for
10346 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -040010347 */
Dean Luick05087f3b2015-12-01 15:38:16 -050010348 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010349 if (ret) {
10350 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010351 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010352 /* state is really offline, so make it so */
10353 ppd->host_link_state = HLS_DN_OFFLINE;
10354 return ret;
10355 }
10356
10357 /*
10358 * The state is now offline and the 8051 is ready to accept host
10359 * requests.
10360 * - change our state
10361 * - notify others if we were previously in a linkup state
10362 */
10363 ppd->host_link_state = HLS_DN_OFFLINE;
10364 if (previous_state & HLS_UP) {
10365 /* went down while link was up */
10366 handle_linkup_change(dd, 0);
10367 } else if (previous_state
10368 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10369 /* went down while attempting link up */
Dean Luick6854c692016-07-25 13:38:56 -070010370 check_lni_states(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010371 }
10372
10373 /* the active link width (downgrade) is 0 on link down */
10374 ppd->link_width_active = 0;
10375 ppd->link_width_downgrade_tx_active = 0;
10376 ppd->link_width_downgrade_rx_active = 0;
10377 ppd->current_egress_rate = 0;
10378 return 0;
10379}
10380
10381/* return the link state name */
10382static const char *link_state_name(u32 state)
10383{
10384 const char *name;
10385 int n = ilog2(state);
10386 static const char * const names[] = {
10387 [__HLS_UP_INIT_BP] = "INIT",
10388 [__HLS_UP_ARMED_BP] = "ARMED",
10389 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10390 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10391 [__HLS_DN_POLL_BP] = "POLL",
10392 [__HLS_DN_DISABLE_BP] = "DISABLE",
10393 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10394 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10395 [__HLS_GOING_UP_BP] = "GOING_UP",
10396 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10397 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10398 };
10399
10400 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10401 return name ? name : "unknown";
10402}
10403
10404/* return the link state reason name */
10405static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10406{
10407 if (state == HLS_UP_INIT) {
10408 switch (ppd->linkinit_reason) {
10409 case OPA_LINKINIT_REASON_LINKUP:
10410 return "(LINKUP)";
10411 case OPA_LINKINIT_REASON_FLAPPING:
10412 return "(FLAPPING)";
10413 case OPA_LINKINIT_OUTSIDE_POLICY:
10414 return "(OUTSIDE_POLICY)";
10415 case OPA_LINKINIT_QUARANTINED:
10416 return "(QUARANTINED)";
10417 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10418 return "(INSUFIC_CAPABILITY)";
10419 default:
10420 break;
10421 }
10422 }
10423 return "";
10424}
10425
10426/*
10427 * driver_physical_state - convert the driver's notion of a port's
10428 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10429 * Return -1 (converted to a u32) to indicate error.
10430 */
10431u32 driver_physical_state(struct hfi1_pportdata *ppd)
10432{
10433 switch (ppd->host_link_state) {
10434 case HLS_UP_INIT:
10435 case HLS_UP_ARMED:
10436 case HLS_UP_ACTIVE:
10437 return IB_PORTPHYSSTATE_LINKUP;
10438 case HLS_DN_POLL:
10439 return IB_PORTPHYSSTATE_POLLING;
10440 case HLS_DN_DISABLE:
10441 return IB_PORTPHYSSTATE_DISABLED;
10442 case HLS_DN_OFFLINE:
10443 return OPA_PORTPHYSSTATE_OFFLINE;
10444 case HLS_VERIFY_CAP:
10445 return IB_PORTPHYSSTATE_POLLING;
10446 case HLS_GOING_UP:
10447 return IB_PORTPHYSSTATE_POLLING;
10448 case HLS_GOING_OFFLINE:
10449 return OPA_PORTPHYSSTATE_OFFLINE;
10450 case HLS_LINK_COOLDOWN:
10451 return OPA_PORTPHYSSTATE_OFFLINE;
10452 case HLS_DN_DOWNDEF:
10453 default:
10454 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10455 ppd->host_link_state);
10456 return -1;
10457 }
10458}
10459
10460/*
10461 * driver_logical_state - convert the driver's notion of a port's
10462 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10463 * (converted to a u32) to indicate error.
10464 */
10465u32 driver_logical_state(struct hfi1_pportdata *ppd)
10466{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010467 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010468 return IB_PORT_DOWN;
10469
10470 switch (ppd->host_link_state & HLS_UP) {
10471 case HLS_UP_INIT:
10472 return IB_PORT_INIT;
10473 case HLS_UP_ARMED:
10474 return IB_PORT_ARMED;
10475 case HLS_UP_ACTIVE:
10476 return IB_PORT_ACTIVE;
10477 default:
10478 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10479 ppd->host_link_state);
10480 return -1;
10481 }
10482}
10483
10484void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10485 u8 neigh_reason, u8 rem_reason)
10486{
10487 if (ppd->local_link_down_reason.latest == 0 &&
10488 ppd->neigh_link_down_reason.latest == 0) {
10489 ppd->local_link_down_reason.latest = lcl_reason;
10490 ppd->neigh_link_down_reason.latest = neigh_reason;
10491 ppd->remote_link_down_reason = rem_reason;
10492 }
10493}
10494
10495/*
Alex Estrin5e2d6762017-07-24 07:46:36 -070010496 * Verify if BCT for data VLs is non-zero.
10497 */
10498static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10499{
10500 return !!ppd->actual_vls_operational;
10501}
10502
10503/*
Mike Marciniszyn77241052015-07-30 15:17:43 -040010504 * Change the physical and/or logical link state.
10505 *
10506 * Do not call this routine while inside an interrupt. It contains
10507 * calls to routines that can take multiple seconds to finish.
10508 *
10509 * Returns 0 on success, -errno on failure.
10510 */
10511int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10512{
10513 struct hfi1_devdata *dd = ppd->dd;
10514 struct ib_event event = {.device = NULL};
10515 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010516 int orig_new_state, poll_bounce;
10517
10518 mutex_lock(&ppd->hls_lock);
10519
10520 orig_new_state = state;
10521 if (state == HLS_DN_DOWNDEF)
10522 state = dd->link_default;
10523
10524 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010525 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10526 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010527
10528 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010529 link_state_name(ppd->host_link_state),
10530 link_state_name(orig_new_state),
10531 poll_bounce ? "(bounce) " : "",
10532 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010533
Mike Marciniszyn77241052015-07-30 15:17:43 -040010534 /*
10535 * If we're going to a (HLS_*) link state that implies the logical
10536 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10537 * reset is_sm_config_started to 0.
10538 */
10539 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10540 ppd->is_sm_config_started = 0;
10541
10542 /*
10543 * Do nothing if the states match. Let a poll to poll link bounce
10544 * go through.
10545 */
10546 if (ppd->host_link_state == state && !poll_bounce)
10547 goto done;
10548
10549 switch (state) {
10550 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010551 if (ppd->host_link_state == HLS_DN_POLL &&
10552 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010553 /*
10554 * Quick link up jumps from polling to here.
10555 *
10556 * Whether in normal or loopback mode, the
10557 * simulator jumps from polling to link up.
10558 * Accept that here.
10559 */
Jubin John17fb4f22016-02-14 20:21:52 -080010560 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010561 } else if (ppd->host_link_state != HLS_GOING_UP) {
10562 goto unexpected;
10563 }
10564
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010565 /*
10566 * Wait for Link_Up physical state.
10567 * Physical and Logical states should already be
10568 * be transitioned to LinkUp and LinkInit respectively.
10569 */
10570 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10571 if (ret) {
10572 dd_dev_err(dd,
10573 "%s: physical state did not change to LINK-UP\n",
10574 __func__);
10575 break;
10576 }
10577
Mike Marciniszyn77241052015-07-30 15:17:43 -040010578 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10579 if (ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010580 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010581 "%s: logical state did not change to INIT\n",
10582 __func__);
Jan Sokolowski59ec8732017-07-24 07:46:18 -070010583 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010584 }
Jan Sokolowski59ec8732017-07-24 07:46:18 -070010585
10586 /* clear old transient LINKINIT_REASON code */
10587 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10588 ppd->linkinit_reason =
10589 OPA_LINKINIT_REASON_LINKUP;
10590
10591 /* enable the port */
10592 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10593
10594 handle_linkup_change(dd, 1);
10595 ppd->host_link_state = HLS_UP_INIT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010596 break;
10597 case HLS_UP_ARMED:
10598 if (ppd->host_link_state != HLS_UP_INIT)
10599 goto unexpected;
10600
Alex Estrin5e2d6762017-07-24 07:46:36 -070010601 if (!data_vls_operational(ppd)) {
10602 dd_dev_err(dd,
10603 "%s: data VLs not operational\n", __func__);
10604 ret = -EINVAL;
10605 break;
10606 }
10607
Mike Marciniszyn77241052015-07-30 15:17:43 -040010608 set_logical_state(dd, LSTATE_ARMED);
10609 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10610 if (ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010611 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010612 "%s: logical state did not change to ARMED\n",
10613 __func__);
Alex Estrin5efd40c2017-07-29 08:43:20 -070010614 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010615 }
Alex Estrin5efd40c2017-07-29 08:43:20 -070010616 ppd->host_link_state = HLS_UP_ARMED;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010617 /*
10618 * The simulator does not currently implement SMA messages,
10619 * so neighbor_normal is not set. Set it here when we first
10620 * move to Armed.
10621 */
10622 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10623 ppd->neighbor_normal = 1;
10624 break;
10625 case HLS_UP_ACTIVE:
10626 if (ppd->host_link_state != HLS_UP_ARMED)
10627 goto unexpected;
10628
Mike Marciniszyn77241052015-07-30 15:17:43 -040010629 set_logical_state(dd, LSTATE_ACTIVE);
10630 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10631 if (ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010632 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010633 "%s: logical state did not change to ACTIVE\n",
10634 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010635 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010636 /* tell all engines to go running */
10637 sdma_all_running(dd);
Alex Estrin5efd40c2017-07-29 08:43:20 -070010638 ppd->host_link_state = HLS_UP_ACTIVE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010639
10640 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010641 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010642 event.element.port_num = ppd->port;
10643 event.event = IB_EVENT_PORT_ACTIVE;
10644 }
10645 break;
10646 case HLS_DN_POLL:
10647 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10648 ppd->host_link_state == HLS_DN_OFFLINE) &&
10649 dd->dc_shutdown)
10650 dc_start(dd);
10651 /* Hand LED control to the DC */
10652 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10653
10654 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10655 u8 tmp = ppd->link_enabled;
10656
10657 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10658 if (ret) {
10659 ppd->link_enabled = tmp;
10660 break;
10661 }
10662 ppd->remote_link_down_reason = 0;
10663
10664 if (ppd->driver_link_ready)
10665 ppd->link_enabled = 1;
10666 }
10667
Jim Snowfb9036d2016-01-11 18:32:21 -050010668 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010669 ret = set_local_link_attributes(ppd);
10670 if (ret)
10671 break;
10672
10673 ppd->port_error_action = 0;
10674 ppd->host_link_state = HLS_DN_POLL;
10675
10676 if (quick_linkup) {
10677 /* quick linkup does not go into polling */
10678 ret = do_quick_linkup(dd);
10679 } else {
10680 ret1 = set_physical_link_state(dd, PLS_POLLING);
10681 if (ret1 != HCMD_SUCCESS) {
10682 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010683 "Failed to transition to Polling link state, return 0x%x\n",
10684 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010685 ret = -EINVAL;
10686 }
10687 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010688 ppd->offline_disabled_reason =
10689 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010690 /*
10691 * If an error occurred above, go back to offline. The
10692 * caller may reschedule another attempt.
10693 */
10694 if (ret)
10695 goto_offline(ppd, 0);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010696 else
10697 cache_physical_state(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010698 break;
10699 case HLS_DN_DISABLE:
10700 /* link is disabled */
10701 ppd->link_enabled = 0;
10702
10703 /* allow any state to transition to disabled */
10704
10705 /* must transition to offline first */
10706 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10707 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10708 if (ret)
10709 break;
10710 ppd->remote_link_down_reason = 0;
10711 }
10712
Michael J. Ruhldb069ec2017-02-08 05:28:13 -080010713 if (!dd->dc_shutdown) {
10714 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10715 if (ret1 != HCMD_SUCCESS) {
10716 dd_dev_err(dd,
10717 "Failed to transition to Disabled link state, return 0x%x\n",
10718 ret1);
10719 ret = -EINVAL;
10720 break;
10721 }
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010722 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10723 if (ret) {
10724 dd_dev_err(dd,
10725 "%s: physical state did not change to DISABLED\n",
10726 __func__);
10727 break;
10728 }
Michael J. Ruhldb069ec2017-02-08 05:28:13 -080010729 dc_shutdown(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010730 }
10731 ppd->host_link_state = HLS_DN_DISABLE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010732 break;
10733 case HLS_DN_OFFLINE:
10734 if (ppd->host_link_state == HLS_DN_DISABLE)
10735 dc_start(dd);
10736
10737 /* allow any state to transition to offline */
10738 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10739 if (!ret)
10740 ppd->remote_link_down_reason = 0;
10741 break;
10742 case HLS_VERIFY_CAP:
10743 if (ppd->host_link_state != HLS_DN_POLL)
10744 goto unexpected;
10745 ppd->host_link_state = HLS_VERIFY_CAP;
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010746 cache_physical_state(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010747 break;
10748 case HLS_GOING_UP:
10749 if (ppd->host_link_state != HLS_VERIFY_CAP)
10750 goto unexpected;
10751
10752 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10753 if (ret1 != HCMD_SUCCESS) {
10754 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010755 "Failed to transition to link up state, return 0x%x\n",
10756 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010757 ret = -EINVAL;
10758 break;
10759 }
10760 ppd->host_link_state = HLS_GOING_UP;
10761 break;
10762
10763 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10764 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10765 default:
10766 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010767 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010768 ret = -EINVAL;
10769 break;
10770 }
10771
Mike Marciniszyn77241052015-07-30 15:17:43 -040010772 goto done;
10773
10774unexpected:
10775 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010776 __func__, link_state_name(ppd->host_link_state),
10777 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010778 ret = -EINVAL;
10779
10780done:
10781 mutex_unlock(&ppd->hls_lock);
10782
10783 if (event.device)
10784 ib_dispatch_event(&event);
10785
10786 return ret;
10787}
10788
10789int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10790{
10791 u64 reg;
10792 int ret = 0;
10793
10794 switch (which) {
10795 case HFI1_IB_CFG_LIDLMC:
10796 set_lidlmc(ppd);
10797 break;
10798 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10799 /*
10800 * The VL Arbitrator high limit is sent in units of 4k
10801 * bytes, while HFI stores it in units of 64 bytes.
10802 */
Jubin John8638b772016-02-14 20:19:24 -080010803 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010804 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10805 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10806 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10807 break;
10808 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10809 /* HFI only supports POLL as the default link down state */
10810 if (val != HLS_DN_POLL)
10811 ret = -EINVAL;
10812 break;
10813 case HFI1_IB_CFG_OP_VLS:
10814 if (ppd->vls_operational != val) {
10815 ppd->vls_operational = val;
10816 if (!ppd->port)
10817 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010818 }
10819 break;
10820 /*
10821 * For link width, link width downgrade, and speed enable, always AND
10822 * the setting with what is actually supported. This has two benefits.
10823 * First, enabled can't have unsupported values, no matter what the
10824 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10825 * "fill in with your supported value" have all the bits in the
10826 * field set, so simply ANDing with supported has the desired result.
10827 */
10828 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10829 ppd->link_width_enabled = val & ppd->link_width_supported;
10830 break;
10831 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10832 ppd->link_width_downgrade_enabled =
10833 val & ppd->link_width_downgrade_supported;
10834 break;
10835 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10836 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10837 break;
10838 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10839 /*
10840 * HFI does not follow IB specs, save this value
10841 * so we can report it, if asked.
10842 */
10843 ppd->overrun_threshold = val;
10844 break;
10845 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10846 /*
10847 * HFI does not follow IB specs, save this value
10848 * so we can report it, if asked.
10849 */
10850 ppd->phy_error_threshold = val;
10851 break;
10852
10853 case HFI1_IB_CFG_MTU:
10854 set_send_length(ppd);
10855 break;
10856
10857 case HFI1_IB_CFG_PKEYS:
10858 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10859 set_partition_keys(ppd);
10860 break;
10861
10862 default:
10863 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10864 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010865 "%s: which %s, val 0x%x: not implemented\n",
10866 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010867 break;
10868 }
10869 return ret;
10870}
10871
10872/* begin functions related to vl arbitration table caching */
10873static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10874{
10875 int i;
10876
10877 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10878 VL_ARB_LOW_PRIO_TABLE_SIZE);
10879 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10880 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10881
10882 /*
10883 * Note that we always return values directly from the
10884 * 'vl_arb_cache' (and do no CSR reads) in response to a
10885 * 'Get(VLArbTable)'. This is obviously correct after a
10886 * 'Set(VLArbTable)', since the cache will then be up to
10887 * date. But it's also correct prior to any 'Set(VLArbTable)'
10888 * since then both the cache, and the relevant h/w registers
10889 * will be zeroed.
10890 */
10891
10892 for (i = 0; i < MAX_PRIO_TABLE; i++)
10893 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10894}
10895
10896/*
10897 * vl_arb_lock_cache
10898 *
10899 * All other vl_arb_* functions should be called only after locking
10900 * the cache.
10901 */
10902static inline struct vl_arb_cache *
10903vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10904{
10905 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10906 return NULL;
10907 spin_lock(&ppd->vl_arb_cache[idx].lock);
10908 return &ppd->vl_arb_cache[idx];
10909}
10910
10911static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10912{
10913 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10914}
10915
10916static void vl_arb_get_cache(struct vl_arb_cache *cache,
10917 struct ib_vl_weight_elem *vl)
10918{
10919 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10920}
10921
10922static void vl_arb_set_cache(struct vl_arb_cache *cache,
10923 struct ib_vl_weight_elem *vl)
10924{
10925 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10926}
10927
10928static int vl_arb_match_cache(struct vl_arb_cache *cache,
10929 struct ib_vl_weight_elem *vl)
10930{
10931 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10932}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010933
Mike Marciniszyn77241052015-07-30 15:17:43 -040010934/* end functions related to vl arbitration table caching */
10935
10936static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10937 u32 size, struct ib_vl_weight_elem *vl)
10938{
10939 struct hfi1_devdata *dd = ppd->dd;
10940 u64 reg;
10941 unsigned int i, is_up = 0;
10942 int drain, ret = 0;
10943
10944 mutex_lock(&ppd->hls_lock);
10945
10946 if (ppd->host_link_state & HLS_UP)
10947 is_up = 1;
10948
10949 drain = !is_ax(dd) && is_up;
10950
10951 if (drain)
10952 /*
10953 * Before adjusting VL arbitration weights, empty per-VL
10954 * FIFOs, otherwise a packet whose VL weight is being
10955 * set to 0 could get stuck in a FIFO with no chance to
10956 * egress.
10957 */
10958 ret = stop_drain_data_vls(dd);
10959
10960 if (ret) {
10961 dd_dev_err(
10962 dd,
10963 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10964 __func__);
10965 goto err;
10966 }
10967
10968 for (i = 0; i < size; i++, vl++) {
10969 /*
10970 * NOTE: The low priority shift and mask are used here, but
10971 * they are the same for both the low and high registers.
10972 */
10973 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10974 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10975 | (((u64)vl->weight
10976 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10977 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10978 write_csr(dd, target + (i * 8), reg);
10979 }
10980 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10981
10982 if (drain)
10983 open_fill_data_vls(dd); /* reopen all VLs */
10984
10985err:
10986 mutex_unlock(&ppd->hls_lock);
10987
10988 return ret;
10989}
10990
10991/*
10992 * Read one credit merge VL register.
10993 */
10994static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10995 struct vl_limit *vll)
10996{
10997 u64 reg = read_csr(dd, csr);
10998
10999 vll->dedicated = cpu_to_be16(
11000 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11001 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11002 vll->shared = cpu_to_be16(
11003 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11004 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11005}
11006
11007/*
11008 * Read the current credit merge limits.
11009 */
11010static int get_buffer_control(struct hfi1_devdata *dd,
11011 struct buffer_control *bc, u16 *overall_limit)
11012{
11013 u64 reg;
11014 int i;
11015
11016 /* not all entries are filled in */
11017 memset(bc, 0, sizeof(*bc));
11018
11019 /* OPA and HFI have a 1-1 mapping */
11020 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080011021 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011022
11023 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11024 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11025
11026 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11027 bc->overall_shared_limit = cpu_to_be16(
11028 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11029 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11030 if (overall_limit)
11031 *overall_limit = (reg
11032 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11033 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11034 return sizeof(struct buffer_control);
11035}
11036
11037static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11038{
11039 u64 reg;
11040 int i;
11041
11042 /* each register contains 16 SC->VLnt mappings, 4 bits each */
11043 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11044 for (i = 0; i < sizeof(u64); i++) {
11045 u8 byte = *(((u8 *)&reg) + i);
11046
11047 dp->vlnt[2 * i] = byte & 0xf;
11048 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11049 }
11050
11051 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11052 for (i = 0; i < sizeof(u64); i++) {
11053 u8 byte = *(((u8 *)&reg) + i);
11054
11055 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11056 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11057 }
11058 return sizeof(struct sc2vlnt);
11059}
11060
11061static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11062 struct ib_vl_weight_elem *vl)
11063{
11064 unsigned int i;
11065
11066 for (i = 0; i < nelems; i++, vl++) {
11067 vl->vl = 0xf;
11068 vl->weight = 0;
11069 }
11070}
11071
11072static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11073{
11074 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080011075 DC_SC_VL_VAL(15_0,
11076 0, dp->vlnt[0] & 0xf,
11077 1, dp->vlnt[1] & 0xf,
11078 2, dp->vlnt[2] & 0xf,
11079 3, dp->vlnt[3] & 0xf,
11080 4, dp->vlnt[4] & 0xf,
11081 5, dp->vlnt[5] & 0xf,
11082 6, dp->vlnt[6] & 0xf,
11083 7, dp->vlnt[7] & 0xf,
11084 8, dp->vlnt[8] & 0xf,
11085 9, dp->vlnt[9] & 0xf,
11086 10, dp->vlnt[10] & 0xf,
11087 11, dp->vlnt[11] & 0xf,
11088 12, dp->vlnt[12] & 0xf,
11089 13, dp->vlnt[13] & 0xf,
11090 14, dp->vlnt[14] & 0xf,
11091 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011092 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080011093 DC_SC_VL_VAL(31_16,
11094 16, dp->vlnt[16] & 0xf,
11095 17, dp->vlnt[17] & 0xf,
11096 18, dp->vlnt[18] & 0xf,
11097 19, dp->vlnt[19] & 0xf,
11098 20, dp->vlnt[20] & 0xf,
11099 21, dp->vlnt[21] & 0xf,
11100 22, dp->vlnt[22] & 0xf,
11101 23, dp->vlnt[23] & 0xf,
11102 24, dp->vlnt[24] & 0xf,
11103 25, dp->vlnt[25] & 0xf,
11104 26, dp->vlnt[26] & 0xf,
11105 27, dp->vlnt[27] & 0xf,
11106 28, dp->vlnt[28] & 0xf,
11107 29, dp->vlnt[29] & 0xf,
11108 30, dp->vlnt[30] & 0xf,
11109 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011110}
11111
11112static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11113 u16 limit)
11114{
11115 if (limit != 0)
11116 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011117 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011118}
11119
11120/* change only the shared limit portion of SendCmGLobalCredit */
11121static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11122{
11123 u64 reg;
11124
11125 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11126 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11127 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11128 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11129}
11130
11131/* change only the total credit limit portion of SendCmGLobalCredit */
11132static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11133{
11134 u64 reg;
11135
11136 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11137 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11138 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11139 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11140}
11141
11142/* set the given per-VL shared limit */
11143static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11144{
11145 u64 reg;
11146 u32 addr;
11147
11148 if (vl < TXE_NUM_DATA_VL)
11149 addr = SEND_CM_CREDIT_VL + (8 * vl);
11150 else
11151 addr = SEND_CM_CREDIT_VL15;
11152
11153 reg = read_csr(dd, addr);
11154 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11155 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11156 write_csr(dd, addr, reg);
11157}
11158
11159/* set the given per-VL dedicated limit */
11160static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11161{
11162 u64 reg;
11163 u32 addr;
11164
11165 if (vl < TXE_NUM_DATA_VL)
11166 addr = SEND_CM_CREDIT_VL + (8 * vl);
11167 else
11168 addr = SEND_CM_CREDIT_VL15;
11169
11170 reg = read_csr(dd, addr);
11171 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11172 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11173 write_csr(dd, addr, reg);
11174}
11175
11176/* spin until the given per-VL status mask bits clear */
11177static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11178 const char *which)
11179{
11180 unsigned long timeout;
11181 u64 reg;
11182
11183 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11184 while (1) {
11185 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11186
11187 if (reg == 0)
11188 return; /* success */
11189 if (time_after(jiffies, timeout))
11190 break; /* timed out */
11191 udelay(1);
11192 }
11193
11194 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011195 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11196 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011197 /*
11198 * If this occurs, it is likely there was a credit loss on the link.
11199 * The only recovery from that is a link bounce.
11200 */
11201 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011202 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011203}
11204
11205/*
11206 * The number of credits on the VLs may be changed while everything
11207 * is "live", but the following algorithm must be followed due to
11208 * how the hardware is actually implemented. In particular,
11209 * Return_Credit_Status[] is the only correct status check.
11210 *
11211 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11212 * set Global_Shared_Credit_Limit = 0
11213 * use_all_vl = 1
11214 * mask0 = all VLs that are changing either dedicated or shared limits
11215 * set Shared_Limit[mask0] = 0
11216 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11217 * if (changing any dedicated limit)
11218 * mask1 = all VLs that are lowering dedicated limits
11219 * lower Dedicated_Limit[mask1]
11220 * spin until Return_Credit_Status[mask1] == 0
11221 * raise Dedicated_Limits
11222 * raise Shared_Limits
11223 * raise Global_Shared_Credit_Limit
11224 *
11225 * lower = if the new limit is lower, set the limit to the new value
11226 * raise = if the new limit is higher than the current value (may be changed
11227 * earlier in the algorithm), set the new limit to the new value
11228 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011229int set_buffer_control(struct hfi1_pportdata *ppd,
11230 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011231{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011232 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011233 u64 changing_mask, ld_mask, stat_mask;
11234 int change_count;
11235 int i, use_all_mask;
11236 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011237 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011238 /*
11239 * A0: add the variable any_shared_limit_changing below and in the
11240 * algorithm above. If removing A0 support, it can be removed.
11241 */
11242 int any_shared_limit_changing;
11243 struct buffer_control cur_bc;
11244 u8 changing[OPA_MAX_VLS];
11245 u8 lowering_dedicated[OPA_MAX_VLS];
11246 u16 cur_total;
11247 u32 new_total = 0;
11248 const u64 all_mask =
11249 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11250 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11251 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11252 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11253 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11254 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11255 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11256 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11257 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11258
11259#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11260#define NUM_USABLE_VLS 16 /* look at VL15 and less */
11261
Mike Marciniszyn77241052015-07-30 15:17:43 -040011262 /* find the new total credits, do sanity check on unused VLs */
11263 for (i = 0; i < OPA_MAX_VLS; i++) {
11264 if (valid_vl(i)) {
11265 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11266 continue;
11267 }
11268 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080011269 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011270 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080011271 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011272 new_bc->vl[i].dedicated = 0;
11273 new_bc->vl[i].shared = 0;
11274 }
11275 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050011276
Mike Marciniszyn77241052015-07-30 15:17:43 -040011277 /* fetch the current values */
11278 get_buffer_control(dd, &cur_bc, &cur_total);
11279
11280 /*
11281 * Create the masks we will use.
11282 */
11283 memset(changing, 0, sizeof(changing));
11284 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080011285 /*
11286 * NOTE: Assumes that the individual VL bits are adjacent and in
11287 * increasing order
11288 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011289 stat_mask =
11290 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11291 changing_mask = 0;
11292 ld_mask = 0;
11293 change_count = 0;
11294 any_shared_limit_changing = 0;
11295 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11296 if (!valid_vl(i))
11297 continue;
11298 this_shared_changing = new_bc->vl[i].shared
11299 != cur_bc.vl[i].shared;
11300 if (this_shared_changing)
11301 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080011302 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11303 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011304 changing[i] = 1;
11305 changing_mask |= stat_mask;
11306 change_count++;
11307 }
11308 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11309 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11310 lowering_dedicated[i] = 1;
11311 ld_mask |= stat_mask;
11312 }
11313 }
11314
11315 /* bracket the credit change with a total adjustment */
11316 if (new_total > cur_total)
11317 set_global_limit(dd, new_total);
11318
11319 /*
11320 * Start the credit change algorithm.
11321 */
11322 use_all_mask = 0;
11323 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011324 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11325 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011326 set_global_shared(dd, 0);
11327 cur_bc.overall_shared_limit = 0;
11328 use_all_mask = 1;
11329 }
11330
11331 for (i = 0; i < NUM_USABLE_VLS; i++) {
11332 if (!valid_vl(i))
11333 continue;
11334
11335 if (changing[i]) {
11336 set_vl_shared(dd, i, 0);
11337 cur_bc.vl[i].shared = 0;
11338 }
11339 }
11340
11341 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080011342 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011343
11344 if (change_count > 0) {
11345 for (i = 0; i < NUM_USABLE_VLS; i++) {
11346 if (!valid_vl(i))
11347 continue;
11348
11349 if (lowering_dedicated[i]) {
11350 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011351 be16_to_cpu(new_bc->
11352 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011353 cur_bc.vl[i].dedicated =
11354 new_bc->vl[i].dedicated;
11355 }
11356 }
11357
11358 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11359
11360 /* now raise all dedicated that are going up */
11361 for (i = 0; i < NUM_USABLE_VLS; i++) {
11362 if (!valid_vl(i))
11363 continue;
11364
11365 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11366 be16_to_cpu(cur_bc.vl[i].dedicated))
11367 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011368 be16_to_cpu(new_bc->
11369 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011370 }
11371 }
11372
11373 /* next raise all shared that are going up */
11374 for (i = 0; i < NUM_USABLE_VLS; i++) {
11375 if (!valid_vl(i))
11376 continue;
11377
11378 if (be16_to_cpu(new_bc->vl[i].shared) >
11379 be16_to_cpu(cur_bc.vl[i].shared))
11380 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11381 }
11382
11383 /* finally raise the global shared */
11384 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080011385 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011386 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011387 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011388
11389 /* bracket the credit change with a total adjustment */
11390 if (new_total < cur_total)
11391 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011392
11393 /*
11394 * Determine the actual number of operational VLS using the number of
11395 * dedicated and shared credits for each VL.
11396 */
11397 if (change_count > 0) {
11398 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11399 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11400 be16_to_cpu(new_bc->vl[i].shared) > 0)
11401 vl_count++;
11402 ppd->actual_vls_operational = vl_count;
11403 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11404 ppd->actual_vls_operational :
11405 ppd->vls_operational,
11406 NULL);
11407 if (ret == 0)
11408 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11409 ppd->actual_vls_operational :
11410 ppd->vls_operational, NULL);
11411 if (ret)
11412 return ret;
11413 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011414 return 0;
11415}
11416
11417/*
11418 * Read the given fabric manager table. Return the size of the
11419 * table (in bytes) on success, and a negative error code on
11420 * failure.
11421 */
11422int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11423
11424{
11425 int size;
11426 struct vl_arb_cache *vlc;
11427
11428 switch (which) {
11429 case FM_TBL_VL_HIGH_ARB:
11430 size = 256;
11431 /*
11432 * OPA specifies 128 elements (of 2 bytes each), though
11433 * HFI supports only 16 elements in h/w.
11434 */
11435 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11436 vl_arb_get_cache(vlc, t);
11437 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11438 break;
11439 case FM_TBL_VL_LOW_ARB:
11440 size = 256;
11441 /*
11442 * OPA specifies 128 elements (of 2 bytes each), though
11443 * HFI supports only 16 elements in h/w.
11444 */
11445 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11446 vl_arb_get_cache(vlc, t);
11447 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11448 break;
11449 case FM_TBL_BUFFER_CONTROL:
11450 size = get_buffer_control(ppd->dd, t, NULL);
11451 break;
11452 case FM_TBL_SC2VLNT:
11453 size = get_sc2vlnt(ppd->dd, t);
11454 break;
11455 case FM_TBL_VL_PREEMPT_ELEMS:
11456 size = 256;
11457 /* OPA specifies 128 elements, of 2 bytes each */
11458 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11459 break;
11460 case FM_TBL_VL_PREEMPT_MATRIX:
11461 size = 256;
11462 /*
11463 * OPA specifies that this is the same size as the VL
11464 * arbitration tables (i.e., 256 bytes).
11465 */
11466 break;
11467 default:
11468 return -EINVAL;
11469 }
11470 return size;
11471}
11472
11473/*
11474 * Write the given fabric manager table.
11475 */
11476int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11477{
11478 int ret = 0;
11479 struct vl_arb_cache *vlc;
11480
11481 switch (which) {
11482 case FM_TBL_VL_HIGH_ARB:
11483 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11484 if (vl_arb_match_cache(vlc, t)) {
11485 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11486 break;
11487 }
11488 vl_arb_set_cache(vlc, t);
11489 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11490 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11491 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11492 break;
11493 case FM_TBL_VL_LOW_ARB:
11494 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11495 if (vl_arb_match_cache(vlc, t)) {
11496 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11497 break;
11498 }
11499 vl_arb_set_cache(vlc, t);
11500 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11501 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11502 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11503 break;
11504 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011505 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011506 break;
11507 case FM_TBL_SC2VLNT:
11508 set_sc2vlnt(ppd->dd, t);
11509 break;
11510 default:
11511 ret = -EINVAL;
11512 }
11513 return ret;
11514}
11515
11516/*
11517 * Disable all data VLs.
11518 *
11519 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11520 */
11521static int disable_data_vls(struct hfi1_devdata *dd)
11522{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011523 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011524 return 1;
11525
11526 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11527
11528 return 0;
11529}
11530
11531/*
11532 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11533 * Just re-enables all data VLs (the "fill" part happens
11534 * automatically - the name was chosen for symmetry with
11535 * stop_drain_data_vls()).
11536 *
11537 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11538 */
11539int open_fill_data_vls(struct hfi1_devdata *dd)
11540{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011541 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011542 return 1;
11543
11544 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11545
11546 return 0;
11547}
11548
11549/*
11550 * drain_data_vls() - assumes that disable_data_vls() has been called,
11551 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11552 * engines to drop to 0.
11553 */
11554static void drain_data_vls(struct hfi1_devdata *dd)
11555{
11556 sc_wait(dd);
11557 sdma_wait(dd);
11558 pause_for_credit_return(dd);
11559}
11560
11561/*
11562 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11563 *
11564 * Use open_fill_data_vls() to resume using data VLs. This pair is
11565 * meant to be used like this:
11566 *
11567 * stop_drain_data_vls(dd);
11568 * // do things with per-VL resources
11569 * open_fill_data_vls(dd);
11570 */
11571int stop_drain_data_vls(struct hfi1_devdata *dd)
11572{
11573 int ret;
11574
11575 ret = disable_data_vls(dd);
11576 if (ret == 0)
11577 drain_data_vls(dd);
11578
11579 return ret;
11580}
11581
11582/*
11583 * Convert a nanosecond time to a cclock count. No matter how slow
11584 * the cclock, a non-zero ns will always have a non-zero result.
11585 */
11586u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11587{
11588 u32 cclocks;
11589
11590 if (dd->icode == ICODE_FPGA_EMULATION)
11591 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11592 else /* simulation pretends to be ASIC */
11593 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11594 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11595 cclocks = 1;
11596 return cclocks;
11597}
11598
11599/*
11600 * Convert a cclock count to nanoseconds. Not matter how slow
11601 * the cclock, a non-zero cclocks will always have a non-zero result.
11602 */
11603u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11604{
11605 u32 ns;
11606
11607 if (dd->icode == ICODE_FPGA_EMULATION)
11608 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11609 else /* simulation pretends to be ASIC */
11610 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11611 if (cclocks && !ns)
11612 ns = 1;
11613 return ns;
11614}
11615
11616/*
11617 * Dynamically adjust the receive interrupt timeout for a context based on
11618 * incoming packet rate.
11619 *
11620 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11621 */
11622static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11623{
11624 struct hfi1_devdata *dd = rcd->dd;
11625 u32 timeout = rcd->rcvavail_timeout;
11626
11627 /*
11628 * This algorithm doubles or halves the timeout depending on whether
11629 * the number of packets received in this interrupt were less than or
11630 * greater equal the interrupt count.
11631 *
11632 * The calculations below do not allow a steady state to be achieved.
11633 * Only at the endpoints it is possible to have an unchanging
11634 * timeout.
11635 */
11636 if (npkts < rcv_intr_count) {
11637 /*
11638 * Not enough packets arrived before the timeout, adjust
11639 * timeout downward.
11640 */
11641 if (timeout < 2) /* already at minimum? */
11642 return;
11643 timeout >>= 1;
11644 } else {
11645 /*
11646 * More than enough packets arrived before the timeout, adjust
11647 * timeout upward.
11648 */
11649 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11650 return;
11651 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11652 }
11653
11654 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011655 /*
11656 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11657 * been verified to be in range
11658 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011659 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011660 (u64)timeout <<
11661 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011662}
11663
11664void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11665 u32 intr_adjust, u32 npkts)
11666{
11667 struct hfi1_devdata *dd = rcd->dd;
11668 u64 reg;
11669 u32 ctxt = rcd->ctxt;
11670
11671 /*
11672 * Need to write timeout register before updating RcvHdrHead to ensure
11673 * that a new value is used when the HW decides to restart counting.
11674 */
11675 if (intr_adjust)
11676 adjust_rcv_timeout(rcd, npkts);
11677 if (updegr) {
11678 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11679 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11680 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11681 }
11682 mmiowb();
11683 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11684 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11685 << RCV_HDR_HEAD_HEAD_SHIFT);
11686 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11687 mmiowb();
11688}
11689
11690u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11691{
11692 u32 head, tail;
11693
11694 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11695 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11696
11697 if (rcd->rcvhdrtail_kvaddr)
11698 tail = get_rcvhdrtail(rcd);
11699 else
11700 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11701
11702 return head == tail;
11703}
11704
11705/*
11706 * Context Control and Receive Array encoding for buffer size:
11707 * 0x0 invalid
11708 * 0x1 4 KB
11709 * 0x2 8 KB
11710 * 0x3 16 KB
11711 * 0x4 32 KB
11712 * 0x5 64 KB
11713 * 0x6 128 KB
11714 * 0x7 256 KB
11715 * 0x8 512 KB (Receive Array only)
11716 * 0x9 1 MB (Receive Array only)
11717 * 0xa 2 MB (Receive Array only)
11718 *
11719 * 0xB-0xF - reserved (Receive Array only)
11720 *
11721 *
11722 * This routine assumes that the value has already been sanity checked.
11723 */
11724static u32 encoded_size(u32 size)
11725{
11726 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011727 case 4 * 1024: return 0x1;
11728 case 8 * 1024: return 0x2;
11729 case 16 * 1024: return 0x3;
11730 case 32 * 1024: return 0x4;
11731 case 64 * 1024: return 0x5;
11732 case 128 * 1024: return 0x6;
11733 case 256 * 1024: return 0x7;
11734 case 512 * 1024: return 0x8;
11735 case 1 * 1024 * 1024: return 0x9;
11736 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011737 }
11738 return 0x1; /* if invalid, go with the minimum size */
11739}
11740
Michael J. Ruhl22505632017-07-24 07:46:06 -070011741void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11742 struct hfi1_ctxtdata *rcd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011743{
Mike Marciniszyn77241052015-07-30 15:17:43 -040011744 u64 rcvctrl, reg;
11745 int did_enable = 0;
Michael J. Ruhl22505632017-07-24 07:46:06 -070011746 u16 ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011747
Mike Marciniszyn77241052015-07-30 15:17:43 -040011748 if (!rcd)
11749 return;
11750
Michael J. Ruhl22505632017-07-24 07:46:06 -070011751 ctxt = rcd->ctxt;
11752
Mike Marciniszyn77241052015-07-30 15:17:43 -040011753 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11754
11755 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11756 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011757 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11758 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011759 /* reset the tail and hdr addresses, and sequence count */
11760 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011761 rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011762 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11763 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011764 rcd->rcvhdrqtailaddr_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011765 rcd->seq_cnt = 1;
11766
11767 /* reset the cached receive header queue head value */
11768 rcd->head = 0;
11769
11770 /*
11771 * Zero the receive header queue so we don't get false
11772 * positives when checking the sequence number. The
11773 * sequence numbers could land exactly on the same spot.
11774 * E.g. a rcd restart before the receive header wrapped.
11775 */
11776 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11777
11778 /* starting timeout */
11779 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11780
11781 /* enable the context */
11782 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11783
11784 /* clean the egr buffer size first */
11785 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11786 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11787 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11788 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11789
11790 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11791 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11792 did_enable = 1;
11793
11794 /* zero RcvEgrIndexHead */
11795 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11796
11797 /* set eager count and base index */
11798 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11799 & RCV_EGR_CTRL_EGR_CNT_MASK)
11800 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11801 (((rcd->eager_base >> RCV_SHIFT)
11802 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11803 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11804 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11805
11806 /*
11807 * Set TID (expected) count and base index.
11808 * rcd->expected_count is set to individual RcvArray entries,
11809 * not pairs, and the CSR takes a pair-count in groups of
11810 * four, so divide by 8.
11811 */
11812 reg = (((rcd->expected_count >> RCV_SHIFT)
11813 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11814 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11815 (((rcd->expected_base >> RCV_SHIFT)
11816 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11817 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11818 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011819 if (ctxt == HFI1_CTRL_CTXT)
11820 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011821 }
11822 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11823 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011824 /*
11825 * When receive context is being disabled turn on tail
11826 * update with a dummy tail address and then disable
11827 * receive context.
11828 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011829 if (dd->rcvhdrtail_dummy_dma) {
Mark F. Brown46b010d2015-11-09 19:18:20 -050011830 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011831 dd->rcvhdrtail_dummy_dma);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011832 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011833 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11834 }
11835
Mike Marciniszyn77241052015-07-30 15:17:43 -040011836 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11837 }
11838 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11839 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11840 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11841 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011842 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011843 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011844 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11845 /* See comment on RcvCtxtCtrl.TailUpd above */
11846 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11847 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11848 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011849 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11850 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11851 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11852 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11853 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011854 /*
11855 * In one-packet-per-eager mode, the size comes from
11856 * the RcvArray entry.
11857 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011858 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11859 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11860 }
11861 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11862 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11863 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11864 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11865 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11866 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11867 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11868 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11869 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11870 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11871 rcd->rcvctrl = rcvctrl;
11872 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11873 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11874
11875 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011876 if (did_enable &&
11877 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011878 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11879 if (reg != 0) {
11880 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011881 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011882 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11883 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11884 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11885 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11886 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11887 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011888 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011889 }
11890 }
11891
11892 if (did_enable) {
11893 /*
11894 * The interrupt timeout and count must be set after
11895 * the context is enabled to take effect.
11896 */
11897 /* set interrupt timeout */
11898 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011899 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011900 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11901
11902 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11903 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11904 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11905 }
11906
11907 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11908 /*
11909 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011910 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11911 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011912 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011913 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011914 dd->rcvhdrtail_dummy_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011915}
11916
Dean Luick582e05c2016-02-18 11:13:01 -080011917u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011918{
11919 int ret;
11920 u64 val = 0;
11921
11922 if (namep) {
11923 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011924 *namep = dd->cntrnames;
11925 } else {
11926 const struct cntr_entry *entry;
11927 int i, j;
11928
11929 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011930
11931 /* Get the start of the block of counters */
11932 *cntrp = dd->cntrs;
11933
11934 /*
11935 * Now go and fill in each counter in the block.
11936 */
11937 for (i = 0; i < DEV_CNTR_LAST; i++) {
11938 entry = &dev_cntrs[i];
11939 hfi1_cdbg(CNTR, "reading %s", entry->name);
11940 if (entry->flags & CNTR_DISABLED) {
11941 /* Nothing */
11942 hfi1_cdbg(CNTR, "\tDisabled\n");
11943 } else {
11944 if (entry->flags & CNTR_VL) {
11945 hfi1_cdbg(CNTR, "\tPer VL\n");
11946 for (j = 0; j < C_VL_COUNT; j++) {
11947 val = entry->rw_cntr(entry,
11948 dd, j,
11949 CNTR_MODE_R,
11950 0);
11951 hfi1_cdbg(
11952 CNTR,
11953 "\t\tRead 0x%llx for %d\n",
11954 val, j);
11955 dd->cntrs[entry->offset + j] =
11956 val;
11957 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011958 } else if (entry->flags & CNTR_SDMA) {
11959 hfi1_cdbg(CNTR,
11960 "\t Per SDMA Engine\n");
11961 for (j = 0; j < dd->chip_sdma_engines;
11962 j++) {
11963 val =
11964 entry->rw_cntr(entry, dd, j,
11965 CNTR_MODE_R, 0);
11966 hfi1_cdbg(CNTR,
11967 "\t\tRead 0x%llx for %d\n",
11968 val, j);
11969 dd->cntrs[entry->offset + j] =
11970 val;
11971 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011972 } else {
11973 val = entry->rw_cntr(entry, dd,
11974 CNTR_INVALID_VL,
11975 CNTR_MODE_R, 0);
11976 dd->cntrs[entry->offset] = val;
11977 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11978 }
11979 }
11980 }
11981 }
11982 return ret;
11983}
11984
11985/*
11986 * Used by sysfs to create files for hfi stats to read
11987 */
Dean Luick582e05c2016-02-18 11:13:01 -080011988u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011989{
11990 int ret;
11991 u64 val = 0;
11992
11993 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080011994 ret = ppd->dd->portcntrnameslen;
11995 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011996 } else {
11997 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011998 int i, j;
11999
Dean Luick582e05c2016-02-18 11:13:01 -080012000 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012001 *cntrp = ppd->cntrs;
12002
12003 for (i = 0; i < PORT_CNTR_LAST; i++) {
12004 entry = &port_cntrs[i];
12005 hfi1_cdbg(CNTR, "reading %s", entry->name);
12006 if (entry->flags & CNTR_DISABLED) {
12007 /* Nothing */
12008 hfi1_cdbg(CNTR, "\tDisabled\n");
12009 continue;
12010 }
12011
12012 if (entry->flags & CNTR_VL) {
12013 hfi1_cdbg(CNTR, "\tPer VL");
12014 for (j = 0; j < C_VL_COUNT; j++) {
12015 val = entry->rw_cntr(entry, ppd, j,
12016 CNTR_MODE_R,
12017 0);
12018 hfi1_cdbg(
12019 CNTR,
12020 "\t\tRead 0x%llx for %d",
12021 val, j);
12022 ppd->cntrs[entry->offset + j] = val;
12023 }
12024 } else {
12025 val = entry->rw_cntr(entry, ppd,
12026 CNTR_INVALID_VL,
12027 CNTR_MODE_R,
12028 0);
12029 ppd->cntrs[entry->offset] = val;
12030 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12031 }
12032 }
12033 }
12034 return ret;
12035}
12036
12037static void free_cntrs(struct hfi1_devdata *dd)
12038{
12039 struct hfi1_pportdata *ppd;
12040 int i;
12041
12042 if (dd->synth_stats_timer.data)
12043 del_timer_sync(&dd->synth_stats_timer);
12044 dd->synth_stats_timer.data = 0;
12045 ppd = (struct hfi1_pportdata *)(dd + 1);
12046 for (i = 0; i < dd->num_pports; i++, ppd++) {
12047 kfree(ppd->cntrs);
12048 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080012049 free_percpu(ppd->ibport_data.rvp.rc_acks);
12050 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12051 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012052 ppd->cntrs = NULL;
12053 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080012054 ppd->ibport_data.rvp.rc_acks = NULL;
12055 ppd->ibport_data.rvp.rc_qacks = NULL;
12056 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012057 }
12058 kfree(dd->portcntrnames);
12059 dd->portcntrnames = NULL;
12060 kfree(dd->cntrs);
12061 dd->cntrs = NULL;
12062 kfree(dd->scntrs);
12063 dd->scntrs = NULL;
12064 kfree(dd->cntrnames);
12065 dd->cntrnames = NULL;
Tadeusz Struk22546b72017-04-28 10:40:02 -070012066 if (dd->update_cntr_wq) {
12067 destroy_workqueue(dd->update_cntr_wq);
12068 dd->update_cntr_wq = NULL;
12069 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012070}
12071
Mike Marciniszyn77241052015-07-30 15:17:43 -040012072static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12073 u64 *psval, void *context, int vl)
12074{
12075 u64 val;
12076 u64 sval = *psval;
12077
12078 if (entry->flags & CNTR_DISABLED) {
12079 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12080 return 0;
12081 }
12082
12083 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12084
12085 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12086
12087 /* If its a synthetic counter there is more work we need to do */
12088 if (entry->flags & CNTR_SYNTH) {
12089 if (sval == CNTR_MAX) {
12090 /* No need to read already saturated */
12091 return CNTR_MAX;
12092 }
12093
12094 if (entry->flags & CNTR_32BIT) {
12095 /* 32bit counters can wrap multiple times */
12096 u64 upper = sval >> 32;
12097 u64 lower = (sval << 32) >> 32;
12098
12099 if (lower > val) { /* hw wrapped */
12100 if (upper == CNTR_32BIT_MAX)
12101 val = CNTR_MAX;
12102 else
12103 upper++;
12104 }
12105
12106 if (val != CNTR_MAX)
12107 val = (upper << 32) | val;
12108
12109 } else {
12110 /* If we rolled we are saturated */
12111 if ((val < sval) || (val > CNTR_MAX))
12112 val = CNTR_MAX;
12113 }
12114 }
12115
12116 *psval = val;
12117
12118 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12119
12120 return val;
12121}
12122
12123static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12124 struct cntr_entry *entry,
12125 u64 *psval, void *context, int vl, u64 data)
12126{
12127 u64 val;
12128
12129 if (entry->flags & CNTR_DISABLED) {
12130 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12131 return 0;
12132 }
12133
12134 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12135
12136 if (entry->flags & CNTR_SYNTH) {
12137 *psval = data;
12138 if (entry->flags & CNTR_32BIT) {
12139 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12140 (data << 32) >> 32);
12141 val = data; /* return the full 64bit value */
12142 } else {
12143 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12144 data);
12145 }
12146 } else {
12147 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12148 }
12149
12150 *psval = val;
12151
12152 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12153
12154 return val;
12155}
12156
12157u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12158{
12159 struct cntr_entry *entry;
12160 u64 *sval;
12161
12162 entry = &dev_cntrs[index];
12163 sval = dd->scntrs + entry->offset;
12164
12165 if (vl != CNTR_INVALID_VL)
12166 sval += vl;
12167
12168 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12169}
12170
12171u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12172{
12173 struct cntr_entry *entry;
12174 u64 *sval;
12175
12176 entry = &dev_cntrs[index];
12177 sval = dd->scntrs + entry->offset;
12178
12179 if (vl != CNTR_INVALID_VL)
12180 sval += vl;
12181
12182 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12183}
12184
12185u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12186{
12187 struct cntr_entry *entry;
12188 u64 *sval;
12189
12190 entry = &port_cntrs[index];
12191 sval = ppd->scntrs + entry->offset;
12192
12193 if (vl != CNTR_INVALID_VL)
12194 sval += vl;
12195
12196 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12197 (index <= C_RCV_HDR_OVF_LAST)) {
12198 /* We do not want to bother for disabled contexts */
12199 return 0;
12200 }
12201
12202 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12203}
12204
12205u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12206{
12207 struct cntr_entry *entry;
12208 u64 *sval;
12209
12210 entry = &port_cntrs[index];
12211 sval = ppd->scntrs + entry->offset;
12212
12213 if (vl != CNTR_INVALID_VL)
12214 sval += vl;
12215
12216 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12217 (index <= C_RCV_HDR_OVF_LAST)) {
12218 /* We do not want to bother for disabled contexts */
12219 return 0;
12220 }
12221
12222 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12223}
12224
Tadeusz Struk22546b72017-04-28 10:40:02 -070012225static void do_update_synth_timer(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012226{
12227 u64 cur_tx;
12228 u64 cur_rx;
12229 u64 total_flits;
12230 u8 update = 0;
12231 int i, j, vl;
12232 struct hfi1_pportdata *ppd;
12233 struct cntr_entry *entry;
Tadeusz Struk22546b72017-04-28 10:40:02 -070012234 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12235 update_cntr_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012236
12237 /*
12238 * Rather than keep beating on the CSRs pick a minimal set that we can
12239 * check to watch for potential roll over. We can do this by looking at
12240 * the number of flits sent/recv. If the total flits exceeds 32bits then
12241 * we have to iterate all the counters and update.
12242 */
12243 entry = &dev_cntrs[C_DC_RCV_FLITS];
12244 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12245
12246 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12247 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12248
12249 hfi1_cdbg(
12250 CNTR,
12251 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12252 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12253
12254 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12255 /*
12256 * May not be strictly necessary to update but it won't hurt and
12257 * simplifies the logic here.
12258 */
12259 update = 1;
12260 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12261 dd->unit);
12262 } else {
12263 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12264 hfi1_cdbg(CNTR,
12265 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12266 total_flits, (u64)CNTR_32BIT_MAX);
12267 if (total_flits >= CNTR_32BIT_MAX) {
12268 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12269 dd->unit);
12270 update = 1;
12271 }
12272 }
12273
12274 if (update) {
12275 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12276 for (i = 0; i < DEV_CNTR_LAST; i++) {
12277 entry = &dev_cntrs[i];
12278 if (entry->flags & CNTR_VL) {
12279 for (vl = 0; vl < C_VL_COUNT; vl++)
12280 read_dev_cntr(dd, i, vl);
12281 } else {
12282 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12283 }
12284 }
12285 ppd = (struct hfi1_pportdata *)(dd + 1);
12286 for (i = 0; i < dd->num_pports; i++, ppd++) {
12287 for (j = 0; j < PORT_CNTR_LAST; j++) {
12288 entry = &port_cntrs[j];
12289 if (entry->flags & CNTR_VL) {
12290 for (vl = 0; vl < C_VL_COUNT; vl++)
12291 read_port_cntr(ppd, j, vl);
12292 } else {
12293 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12294 }
12295 }
12296 }
12297
12298 /*
12299 * We want the value in the register. The goal is to keep track
12300 * of the number of "ticks" not the counter value. In other
12301 * words if the register rolls we want to notice it and go ahead
12302 * and force an update.
12303 */
12304 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12305 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12306 CNTR_MODE_R, 0);
12307
12308 entry = &dev_cntrs[C_DC_RCV_FLITS];
12309 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12310 CNTR_MODE_R, 0);
12311
12312 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12313 dd->unit, dd->last_tx, dd->last_rx);
12314
12315 } else {
12316 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12317 }
Tadeusz Struk22546b72017-04-28 10:40:02 -070012318}
Mike Marciniszyn77241052015-07-30 15:17:43 -040012319
Tadeusz Struk22546b72017-04-28 10:40:02 -070012320static void update_synth_timer(unsigned long opaque)
12321{
12322 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12323
12324 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
Bart Van Assche48a0cc132016-06-03 12:09:56 -070012325 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012326}
12327
Jianxin Xiong09a79082016-10-25 13:12:40 -070012328#define C_MAX_NAME 16 /* 15 chars + one for /0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012329static int init_cntrs(struct hfi1_devdata *dd)
12330{
Dean Luickc024c552016-01-11 18:30:57 -050012331 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012332 size_t sz;
12333 char *p;
12334 char name[C_MAX_NAME];
12335 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012336 const char *bit_type_32 = ",32";
12337 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012338
12339 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053012340 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12341 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012342
12343 /***********************/
12344 /* per device counters */
12345 /***********************/
12346
12347 /* size names and determine how many we have*/
12348 dd->ndevcntrs = 0;
12349 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012350
12351 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012352 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12353 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12354 continue;
12355 }
12356
12357 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050012358 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012359 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012360 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012361 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012362 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012363 /* Add ",32" for 32-bit counters */
12364 if (dev_cntrs[i].flags & CNTR_32BIT)
12365 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012366 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012367 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012368 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012369 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050012370 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012371 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012372 snprintf(name, C_MAX_NAME, "%s%d",
12373 dev_cntrs[i].name, j);
12374 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012375 /* Add ",32" for 32-bit counters */
12376 if (dev_cntrs[i].flags & CNTR_32BIT)
12377 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012378 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012379 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012380 }
12381 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012382 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012383 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012384 /* Add ",32" for 32-bit counters */
12385 if (dev_cntrs[i].flags & CNTR_32BIT)
12386 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050012387 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012388 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012389 }
12390 }
12391
12392 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050012393 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012394 if (!dd->cntrs)
12395 goto bail;
12396
Dean Luickc024c552016-01-11 18:30:57 -050012397 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012398 if (!dd->scntrs)
12399 goto bail;
12400
Mike Marciniszyn77241052015-07-30 15:17:43 -040012401 /* allocate space for the counter names */
12402 dd->cntrnameslen = sz;
12403 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12404 if (!dd->cntrnames)
12405 goto bail;
12406
12407 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050012408 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012409 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12410 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012411 } else if (dev_cntrs[i].flags & CNTR_VL) {
12412 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012413 snprintf(name, C_MAX_NAME, "%s%d",
12414 dev_cntrs[i].name,
12415 vl_from_idx(j));
12416 memcpy(p, name, strlen(name));
12417 p += strlen(name);
12418
12419 /* Counter is 32 bits */
12420 if (dev_cntrs[i].flags & CNTR_32BIT) {
12421 memcpy(p, bit_type_32, bit_type_32_sz);
12422 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012423 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012424
Mike Marciniszyn77241052015-07-30 15:17:43 -040012425 *p++ = '\n';
12426 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012427 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12428 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012429 snprintf(name, C_MAX_NAME, "%s%d",
12430 dev_cntrs[i].name, j);
12431 memcpy(p, name, strlen(name));
12432 p += strlen(name);
12433
12434 /* Counter is 32 bits */
12435 if (dev_cntrs[i].flags & CNTR_32BIT) {
12436 memcpy(p, bit_type_32, bit_type_32_sz);
12437 p += bit_type_32_sz;
12438 }
12439
12440 *p++ = '\n';
12441 }
12442 } else {
12443 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12444 p += strlen(dev_cntrs[i].name);
12445
12446 /* Counter is 32 bits */
12447 if (dev_cntrs[i].flags & CNTR_32BIT) {
12448 memcpy(p, bit_type_32, bit_type_32_sz);
12449 p += bit_type_32_sz;
12450 }
12451
12452 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012453 }
12454 }
12455
12456 /*********************/
12457 /* per port counters */
12458 /*********************/
12459
12460 /*
12461 * Go through the counters for the overflows and disable the ones we
12462 * don't need. This varies based on platform so we need to do it
12463 * dynamically here.
12464 */
12465 rcv_ctxts = dd->num_rcv_contexts;
12466 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12467 i <= C_RCV_HDR_OVF_LAST; i++) {
12468 port_cntrs[i].flags |= CNTR_DISABLED;
12469 }
12470
12471 /* size port counter names and determine how many we have*/
12472 sz = 0;
12473 dd->nportcntrs = 0;
12474 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012475 if (port_cntrs[i].flags & CNTR_DISABLED) {
12476 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12477 continue;
12478 }
12479
12480 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012481 port_cntrs[i].offset = dd->nportcntrs;
12482 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012483 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012484 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012485 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012486 /* Add ",32" for 32-bit counters */
12487 if (port_cntrs[i].flags & CNTR_32BIT)
12488 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012489 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012490 dd->nportcntrs++;
12491 }
12492 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012493 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012494 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012495 /* Add ",32" for 32-bit counters */
12496 if (port_cntrs[i].flags & CNTR_32BIT)
12497 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012498 port_cntrs[i].offset = dd->nportcntrs;
12499 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012500 }
12501 }
12502
12503 /* allocate space for the counter names */
12504 dd->portcntrnameslen = sz;
12505 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12506 if (!dd->portcntrnames)
12507 goto bail;
12508
12509 /* fill in port cntr names */
12510 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12511 if (port_cntrs[i].flags & CNTR_DISABLED)
12512 continue;
12513
12514 if (port_cntrs[i].flags & CNTR_VL) {
12515 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012516 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012517 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012518 memcpy(p, name, strlen(name));
12519 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012520
12521 /* Counter is 32 bits */
12522 if (port_cntrs[i].flags & CNTR_32BIT) {
12523 memcpy(p, bit_type_32, bit_type_32_sz);
12524 p += bit_type_32_sz;
12525 }
12526
Mike Marciniszyn77241052015-07-30 15:17:43 -040012527 *p++ = '\n';
12528 }
12529 } else {
12530 memcpy(p, port_cntrs[i].name,
12531 strlen(port_cntrs[i].name));
12532 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012533
12534 /* Counter is 32 bits */
12535 if (port_cntrs[i].flags & CNTR_32BIT) {
12536 memcpy(p, bit_type_32, bit_type_32_sz);
12537 p += bit_type_32_sz;
12538 }
12539
Mike Marciniszyn77241052015-07-30 15:17:43 -040012540 *p++ = '\n';
12541 }
12542 }
12543
12544 /* allocate per port storage for counter values */
12545 ppd = (struct hfi1_pportdata *)(dd + 1);
12546 for (i = 0; i < dd->num_pports; i++, ppd++) {
12547 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12548 if (!ppd->cntrs)
12549 goto bail;
12550
12551 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12552 if (!ppd->scntrs)
12553 goto bail;
12554 }
12555
12556 /* CPU counters need to be allocated and zeroed */
12557 if (init_cpu_counters(dd))
12558 goto bail;
12559
Tadeusz Struk22546b72017-04-28 10:40:02 -070012560 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12561 WQ_MEM_RECLAIM, dd->unit);
12562 if (!dd->update_cntr_wq)
12563 goto bail;
12564
12565 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12566
Mike Marciniszyn77241052015-07-30 15:17:43 -040012567 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12568 return 0;
12569bail:
12570 free_cntrs(dd);
12571 return -ENOMEM;
12572}
12573
Mike Marciniszyn77241052015-07-30 15:17:43 -040012574static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12575{
12576 switch (chip_lstate) {
12577 default:
12578 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012579 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12580 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012581 /* fall through */
12582 case LSTATE_DOWN:
12583 return IB_PORT_DOWN;
12584 case LSTATE_INIT:
12585 return IB_PORT_INIT;
12586 case LSTATE_ARMED:
12587 return IB_PORT_ARMED;
12588 case LSTATE_ACTIVE:
12589 return IB_PORT_ACTIVE;
12590 }
12591}
12592
12593u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12594{
12595 /* look at the HFI meta-states only */
12596 switch (chip_pstate & 0xf0) {
12597 default:
12598 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012599 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012600 /* fall through */
12601 case PLS_DISABLED:
12602 return IB_PORTPHYSSTATE_DISABLED;
12603 case PLS_OFFLINE:
12604 return OPA_PORTPHYSSTATE_OFFLINE;
12605 case PLS_POLLING:
12606 return IB_PORTPHYSSTATE_POLLING;
12607 case PLS_CONFIGPHY:
12608 return IB_PORTPHYSSTATE_TRAINING;
12609 case PLS_LINKUP:
12610 return IB_PORTPHYSSTATE_LINKUP;
12611 case PLS_PHYTEST:
12612 return IB_PORTPHYSSTATE_PHY_TEST;
12613 }
12614}
12615
12616/* return the OPA port logical state name */
12617const char *opa_lstate_name(u32 lstate)
12618{
12619 static const char * const port_logical_names[] = {
12620 "PORT_NOP",
12621 "PORT_DOWN",
12622 "PORT_INIT",
12623 "PORT_ARMED",
12624 "PORT_ACTIVE",
12625 "PORT_ACTIVE_DEFER",
12626 };
12627 if (lstate < ARRAY_SIZE(port_logical_names))
12628 return port_logical_names[lstate];
12629 return "unknown";
12630}
12631
12632/* return the OPA port physical state name */
12633const char *opa_pstate_name(u32 pstate)
12634{
12635 static const char * const port_physical_names[] = {
12636 "PHYS_NOP",
12637 "reserved1",
12638 "PHYS_POLL",
12639 "PHYS_DISABLED",
12640 "PHYS_TRAINING",
12641 "PHYS_LINKUP",
12642 "PHYS_LINK_ERR_RECOVER",
12643 "PHYS_PHY_TEST",
12644 "reserved8",
12645 "PHYS_OFFLINE",
12646 "PHYS_GANGED",
12647 "PHYS_TEST",
12648 };
12649 if (pstate < ARRAY_SIZE(port_physical_names))
12650 return port_physical_names[pstate];
12651 return "unknown";
12652}
12653
12654/*
12655 * Read the hardware link state and set the driver's cached value of it.
12656 * Return the (new) current value.
12657 */
12658u32 get_logical_state(struct hfi1_pportdata *ppd)
12659{
12660 u32 new_state;
12661
12662 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12663 if (new_state != ppd->lstate) {
12664 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012665 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012666 ppd->lstate = new_state;
12667 }
12668 /*
12669 * Set port status flags in the page mapped into userspace
12670 * memory. Do it here to ensure a reliable state - this is
12671 * the only function called by all state handling code.
12672 * Always set the flags due to the fact that the cache value
12673 * might have been changed explicitly outside of this
12674 * function.
12675 */
12676 if (ppd->statusp) {
12677 switch (ppd->lstate) {
12678 case IB_PORT_DOWN:
12679 case IB_PORT_INIT:
12680 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12681 HFI1_STATUS_IB_READY);
12682 break;
12683 case IB_PORT_ARMED:
12684 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12685 break;
12686 case IB_PORT_ACTIVE:
12687 *ppd->statusp |= HFI1_STATUS_IB_READY;
12688 break;
12689 }
12690 }
12691 return ppd->lstate;
12692}
12693
12694/**
12695 * wait_logical_linkstate - wait for an IB link state change to occur
12696 * @ppd: port device
12697 * @state: the state to wait for
12698 * @msecs: the number of milliseconds to wait
12699 *
12700 * Wait up to msecs milliseconds for IB link state change to occur.
12701 * For now, take the easy polling route.
12702 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12703 */
12704static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12705 int msecs)
12706{
12707 unsigned long timeout;
12708
12709 timeout = jiffies + msecs_to_jiffies(msecs);
12710 while (1) {
12711 if (get_logical_state(ppd) == state)
12712 return 0;
12713 if (time_after(jiffies, timeout))
12714 break;
12715 msleep(20);
12716 }
12717 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12718
12719 return -ETIMEDOUT;
12720}
12721
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012722/*
12723 * Read the physical hardware link state and set the driver's cached value
12724 * of it.
12725 */
12726void cache_physical_state(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012727{
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012728 u32 read_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012729 u32 ib_pstate;
12730
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012731 read_pstate = read_physical_state(ppd->dd);
12732 ib_pstate = chip_to_opa_pstate(ppd->dd, read_pstate);
12733 /* check if OPA pstate changed */
12734 if (chip_to_opa_pstate(ppd->dd, ppd->pstate) != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012735 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012736 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12737 __func__, opa_pstate_name(ib_pstate), ib_pstate,
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012738 read_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012739 }
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012740 ppd->pstate = read_pstate;
12741}
12742
12743/*
12744 * wait_physical_linkstate - wait for an physical link state change to occur
12745 * @ppd: port device
12746 * @state: the state to wait for
12747 * @msecs: the number of milliseconds to wait
12748 *
12749 * Wait up to msecs milliseconds for physical link state change to occur.
12750 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12751 */
12752static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12753 int msecs)
12754{
12755 unsigned long timeout;
12756
12757 timeout = jiffies + msecs_to_jiffies(msecs);
12758 while (1) {
12759 cache_physical_state(ppd);
12760 if (ppd->pstate == state)
12761 break;
12762 if (time_after(jiffies, timeout)) {
12763 dd_dev_err(ppd->dd,
12764 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
12765 state, ppd->pstate);
12766 return -ETIMEDOUT;
12767 }
12768 usleep_range(1950, 2050); /* sleep 2ms-ish */
12769 }
12770
12771 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012772}
12773
Mike Marciniszyn77241052015-07-30 15:17:43 -040012774#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12775(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12776
12777#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12778(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12779
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -070012780void hfi1_init_ctxt(struct send_context *sc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012781{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012782 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012783 struct hfi1_devdata *dd = sc->dd;
12784 u64 reg;
12785 u8 set = (sc->type == SC_USER ?
12786 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12787 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12788 reg = read_kctxt_csr(dd, sc->hw_context,
12789 SEND_CTXT_CHECK_ENABLE);
12790 if (set)
12791 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12792 else
12793 SET_STATIC_RATE_CONTROL_SMASK(reg);
12794 write_kctxt_csr(dd, sc->hw_context,
12795 SEND_CTXT_CHECK_ENABLE, reg);
12796 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012797}
12798
12799int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12800{
12801 int ret = 0;
12802 u64 reg;
12803
12804 if (dd->icode != ICODE_RTL_SILICON) {
12805 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12806 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12807 __func__);
12808 return -EINVAL;
12809 }
12810 reg = read_csr(dd, ASIC_STS_THERM);
12811 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12812 ASIC_STS_THERM_CURR_TEMP_MASK);
12813 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12814 ASIC_STS_THERM_LO_TEMP_MASK);
12815 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12816 ASIC_STS_THERM_HI_TEMP_MASK);
12817 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12818 ASIC_STS_THERM_CRIT_TEMP_MASK);
12819 /* triggers is a 3-bit value - 1 bit per trigger. */
12820 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12821
12822 return ret;
12823}
12824
12825/* ========================================================================= */
12826
12827/*
12828 * Enable/disable chip from delivering interrupts.
12829 */
12830void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12831{
12832 int i;
12833
12834 /*
12835 * In HFI, the mask needs to be 1 to allow interrupts.
12836 */
12837 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012838 /* enable all interrupts */
12839 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012840 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012841
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012842 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012843 } else {
12844 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012845 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012846 }
12847}
12848
12849/*
12850 * Clear all interrupt sources on the chip.
12851 */
12852static void clear_all_interrupts(struct hfi1_devdata *dd)
12853{
12854 int i;
12855
12856 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012857 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012858
12859 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12860 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12861 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12862 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12863 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12864 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12865 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12866 for (i = 0; i < dd->chip_send_contexts; i++)
12867 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12868 for (i = 0; i < dd->chip_sdma_engines; i++)
12869 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12870
12871 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12872 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12873 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12874}
12875
12876/* Move to pcie.c? */
12877static void disable_intx(struct pci_dev *pdev)
12878{
12879 pci_intx(pdev, 0);
12880}
12881
12882static void clean_up_interrupts(struct hfi1_devdata *dd)
12883{
12884 int i;
12885
12886 /* remove irqs - must happen before disabling/turning off */
12887 if (dd->num_msix_entries) {
12888 /* MSI-X */
12889 struct hfi1_msix_entry *me = dd->msix_entries;
12890
12891 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012892 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012893 continue;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012894 hfi1_put_irq_affinity(dd, me);
12895 free_irq(me->irq, me->arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012896 }
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012897
12898 /* clean structures */
12899 kfree(dd->msix_entries);
12900 dd->msix_entries = NULL;
12901 dd->num_msix_entries = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012902 } else {
12903 /* INTx */
12904 if (dd->requested_intx_irq) {
12905 free_irq(dd->pcidev->irq, dd);
12906 dd->requested_intx_irq = 0;
12907 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012908 disable_intx(dd->pcidev);
12909 }
12910
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012911 pci_free_irq_vectors(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012912}
12913
12914/*
12915 * Remap the interrupt source from the general handler to the given MSI-X
12916 * interrupt.
12917 */
12918static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12919{
12920 u64 reg;
12921 int m, n;
12922
12923 /* clear from the handled mask of the general interrupt */
12924 m = isrc / 64;
12925 n = isrc % 64;
Dennis Dalessandrobc54f672017-05-29 17:18:14 -070012926 if (likely(m < CCE_NUM_INT_CSRS)) {
12927 dd->gi_mask[m] &= ~((u64)1 << n);
12928 } else {
12929 dd_dev_err(dd, "remap interrupt err\n");
12930 return;
12931 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012932
12933 /* direct the chip source to the given MSI-X interrupt */
12934 m = isrc / 8;
12935 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012936 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12937 reg &= ~((u64)0xff << (8 * n));
12938 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12939 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012940}
12941
12942static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12943 int engine, int msix_intr)
12944{
12945 /*
12946 * SDMA engine interrupt sources grouped by type, rather than
12947 * engine. Per-engine interrupts are as follows:
12948 * SDMA
12949 * SDMAProgress
12950 * SDMAIdle
12951 */
Jubin John8638b772016-02-14 20:19:24 -080012952 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012953 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012954 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012955 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012956 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012957 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012958}
12959
Mike Marciniszyn77241052015-07-30 15:17:43 -040012960static int request_intx_irq(struct hfi1_devdata *dd)
12961{
12962 int ret;
12963
Jubin John98050712015-11-16 21:59:27 -050012964 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12965 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012966 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012967 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012968 if (ret)
12969 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012970 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012971 else
12972 dd->requested_intx_irq = 1;
12973 return ret;
12974}
12975
12976static int request_msix_irqs(struct hfi1_devdata *dd)
12977{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012978 int first_general, last_general;
12979 int first_sdma, last_sdma;
12980 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012981 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012982
12983 /* calculate the ranges we are going to use */
12984 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012985 last_general = first_general + 1;
12986 first_sdma = last_general;
12987 last_sdma = first_sdma + dd->num_sdma;
12988 first_rx = last_sdma;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070012989 last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
12990
12991 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
12992 dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012993
12994 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012995 * Sanity check - the code expects all SDMA chip source
12996 * interrupts to be in the same CSR, starting at bit 0. Verify
12997 * that this is true by checking the bit location of the start.
12998 */
12999 BUILD_BUG_ON(IS_SDMA_START % 64);
13000
13001 for (i = 0; i < dd->num_msix_entries; i++) {
13002 struct hfi1_msix_entry *me = &dd->msix_entries[i];
13003 const char *err_info;
13004 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040013005 irq_handler_t thread = NULL;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013006 void *arg = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013007 int idx;
13008 struct hfi1_ctxtdata *rcd = NULL;
13009 struct sdma_engine *sde = NULL;
13010
13011 /* obtain the arguments to request_irq */
13012 if (first_general <= i && i < last_general) {
13013 idx = i - first_general;
13014 handler = general_interrupt;
13015 arg = dd;
13016 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050013017 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013018 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080013019 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013020 } else if (first_sdma <= i && i < last_sdma) {
13021 idx = i - first_sdma;
13022 sde = &dd->per_sdma[idx];
13023 handler = sdma_interrupt;
13024 arg = sde;
13025 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050013026 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013027 err_info = "sdma";
13028 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080013029 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013030 } else if (first_rx <= i && i < last_rx) {
13031 idx = i - first_rx;
13032 rcd = dd->rcd[idx];
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013033 if (rcd) {
13034 /*
13035 * Set the interrupt register and mask for this
13036 * context's interrupt.
13037 */
13038 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13039 rcd->imask = ((u64)1) <<
13040 ((IS_RCVAVAIL_START + idx) % 64);
13041 handler = receive_context_interrupt;
13042 thread = receive_context_thread;
13043 arg = rcd;
13044 snprintf(me->name, sizeof(me->name),
13045 DRIVER_NAME "_%d kctxt%d",
13046 dd->unit, idx);
13047 err_info = "receive context";
13048 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13049 me->type = IRQ_RCVCTXT;
13050 rcd->msix_intr = i;
13051 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013052 } else {
13053 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080013054 * ignore it
13055 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013056 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013057 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013058 continue;
13059 }
13060 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080013061 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013062 continue;
13063 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080013064 me->name[sizeof(me->name) - 1] = 0;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013065 me->irq = pci_irq_vector(dd->pcidev, i);
13066 /*
13067 * On err return me->irq. Don't need to clear this
13068 * because 'arg' has not been set, and cleanup will
13069 * do the right thing.
13070 */
13071 if (me->irq < 0)
13072 return me->irq;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013073
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013074 ret = request_threaded_irq(me->irq, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080013075 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013076 if (ret) {
13077 dd_dev_err(dd,
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013078 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13079 err_info, me->irq, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013080 return ret;
13081 }
13082 /*
13083 * assign arg after request_irq call, so it will be
13084 * cleaned up
13085 */
13086 me->arg = arg;
13087
Mitko Haralanov957558c2016-02-03 14:33:40 -080013088 ret = hfi1_get_irq_affinity(dd, me);
13089 if (ret)
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013090 dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013091 }
13092
Mike Marciniszyn77241052015-07-30 15:17:43 -040013093 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013094}
13095
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013096void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13097{
13098 int i;
13099
13100 if (!dd->num_msix_entries) {
13101 synchronize_irq(dd->pcidev->irq);
13102 return;
13103 }
13104
13105 for (i = 0; i < dd->vnic.num_ctxt; i++) {
13106 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13107 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13108
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013109 synchronize_irq(me->irq);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013110 }
13111}
13112
13113void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13114{
13115 struct hfi1_devdata *dd = rcd->dd;
13116 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13117
13118 if (!me->arg) /* => no irq, no affinity */
13119 return;
13120
13121 hfi1_put_irq_affinity(dd, me);
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013122 free_irq(me->irq, me->arg);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013123
13124 me->arg = NULL;
13125}
13126
13127void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13128{
13129 struct hfi1_devdata *dd = rcd->dd;
13130 struct hfi1_msix_entry *me;
13131 int idx = rcd->ctxt;
13132 void *arg = rcd;
13133 int ret;
13134
13135 rcd->msix_intr = dd->vnic.msix_idx++;
13136 me = &dd->msix_entries[rcd->msix_intr];
13137
13138 /*
13139 * Set the interrupt register and mask for this
13140 * context's interrupt.
13141 */
13142 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13143 rcd->imask = ((u64)1) <<
13144 ((IS_RCVAVAIL_START + idx) % 64);
13145
13146 snprintf(me->name, sizeof(me->name),
13147 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13148 me->name[sizeof(me->name) - 1] = 0;
13149 me->type = IRQ_RCVCTXT;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013150 me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13151 if (me->irq < 0) {
13152 dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
13153 idx, me->irq);
13154 return;
13155 }
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013156 remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13157
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013158 ret = request_threaded_irq(me->irq, receive_context_interrupt,
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013159 receive_context_thread, 0, me->name, arg);
13160 if (ret) {
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013161 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13162 me->irq, idx, ret);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013163 return;
13164 }
13165 /*
13166 * assign arg after request_irq call, so it will be
13167 * cleaned up
13168 */
13169 me->arg = arg;
13170
13171 ret = hfi1_get_irq_affinity(dd, me);
13172 if (ret) {
13173 dd_dev_err(dd,
13174 "unable to pin IRQ %d\n", ret);
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013175 free_irq(me->irq, me->arg);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013176 }
13177}
13178
Mike Marciniszyn77241052015-07-30 15:17:43 -040013179/*
13180 * Set the general handler to accept all interrupts, remap all
13181 * chip interrupts back to MSI-X 0.
13182 */
13183static void reset_interrupts(struct hfi1_devdata *dd)
13184{
13185 int i;
13186
13187 /* all interrupts handled by the general handler */
13188 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13189 dd->gi_mask[i] = ~(u64)0;
13190
13191 /* all chip interrupts map to MSI-X 0 */
13192 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013193 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013194}
13195
13196static int set_up_interrupts(struct hfi1_devdata *dd)
13197{
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013198 u32 total;
13199 int ret, request;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013200 int single_interrupt = 0; /* we expect to have all the interrupts */
13201
13202 /*
13203 * Interrupt count:
13204 * 1 general, "slow path" interrupt (includes the SDMA engines
13205 * slow source, SDMACleanupDone)
13206 * N interrupts - one per used SDMA engine
13207 * M interrupt - one per kernel receive context
13208 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013209 total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013210
Mike Marciniszyn77241052015-07-30 15:17:43 -040013211 /* ask for MSI-X interrupts */
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013212 request = request_msix(dd, total);
13213 if (request < 0) {
13214 ret = request;
13215 goto fail;
13216 } else if (request == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013217 /* using INTx */
13218 /* dd->num_msix_entries already zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013219 single_interrupt = 1;
13220 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013221 } else if (request < total) {
13222 /* using MSI-X, with reduced interrupts */
13223 dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13224 total, request);
13225 ret = -EINVAL;
13226 goto fail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013227 } else {
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013228 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13229 GFP_KERNEL);
13230 if (!dd->msix_entries) {
13231 ret = -ENOMEM;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013232 goto fail;
13233 }
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013234 /* using MSI-X */
13235 dd->num_msix_entries = total;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013236 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13237 }
13238
13239 /* mask all interrupts */
13240 set_intr_state(dd, 0);
13241 /* clear all pending interrupts */
13242 clear_all_interrupts(dd);
13243
13244 /* reset general handler mask, chip MSI-X mappings */
13245 reset_interrupts(dd);
13246
13247 if (single_interrupt)
13248 ret = request_intx_irq(dd);
13249 else
13250 ret = request_msix_irqs(dd);
13251 if (ret)
13252 goto fail;
13253
13254 return 0;
13255
13256fail:
13257 clean_up_interrupts(dd);
13258 return ret;
13259}
13260
13261/*
13262 * Set up context values in dd. Sets:
13263 *
13264 * num_rcv_contexts - number of contexts being used
13265 * n_krcv_queues - number of kernel contexts
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013266 * first_dyn_alloc_ctxt - first dynamically allocated context
13267 * in array of contexts
Mike Marciniszyn77241052015-07-30 15:17:43 -040013268 * freectxts - number of free user contexts
13269 * num_send_contexts - number of PIO send contexts being used
13270 */
13271static int set_up_context_variables(struct hfi1_devdata *dd)
13272{
Harish Chegondi429b6a72016-08-31 07:24:40 -070013273 unsigned long num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013274 int total_contexts;
13275 int ret;
13276 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070013277 int qos_rmt_count;
13278 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013279
13280 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013281 * Kernel receive contexts:
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013282 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070013283 * - Context 1 - first kernel context
13284 * - Context 2 - second kernel context
13285 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040013286 */
13287 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013288 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013289 * n_krcvqs is the sum of module parameter kernel receive
13290 * contexts, krcvqs[]. It does not include the control
13291 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013292 */
Dean Luick33a9eb52016-04-12 10:50:22 -070013293 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013294 else
Harish Chegondi8784ac02016-07-25 13:38:50 -070013295 num_kernel_contexts = DEFAULT_KRCVQS + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013296 /*
13297 * Every kernel receive context needs an ACK send context.
13298 * one send context is allocated for each VL{0-7} and VL15
13299 */
13300 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13301 dd_dev_err(dd,
Harish Chegondi429b6a72016-08-31 07:24:40 -070013302 "Reducing # kernel rcv contexts to: %d, from %lu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013303 (int)(dd->chip_send_contexts - num_vls - 1),
Harish Chegondi429b6a72016-08-31 07:24:40 -070013304 num_kernel_contexts);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013305 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13306 }
13307 /*
Jubin John0852d242016-04-12 11:30:08 -070013308 * User contexts:
13309 * - default to 1 user context per real (non-HT) CPU core if
13310 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040013311 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050013312 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070013313 num_user_contexts =
Dennis Dalessandro41973442016-07-25 07:52:36 -070013314 cpumask_weight(&node_affinity.real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013315
13316 total_contexts = num_kernel_contexts + num_user_contexts;
13317
13318 /*
13319 * Adjust the counts given a global max.
13320 */
13321 if (total_contexts > dd->chip_rcv_contexts) {
13322 dd_dev_err(dd,
13323 "Reducing # user receive contexts to: %d, from %d\n",
13324 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
13325 (int)num_user_contexts);
13326 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
13327 /* recalculate */
13328 total_contexts = num_kernel_contexts + num_user_contexts;
13329 }
13330
Dean Luick8f000f72016-04-12 11:32:06 -070013331 /* each user context requires an entry in the RMT */
13332 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13333 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13334 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13335 dd_dev_err(dd,
13336 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13337 (int)num_user_contexts,
13338 user_rmt_reduced);
13339 /* recalculate */
13340 num_user_contexts = user_rmt_reduced;
13341 total_contexts = num_kernel_contexts + num_user_contexts;
13342 }
13343
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013344 /* Accommodate VNIC contexts */
13345 if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
13346 total_contexts += HFI1_NUM_VNIC_CTXT;
13347
13348 /* the first N are kernel contexts, the rest are user/vnic contexts */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013349 dd->num_rcv_contexts = total_contexts;
13350 dd->n_krcv_queues = num_kernel_contexts;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013351 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013352 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013353 dd->freectxts = num_user_contexts;
13354 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013355 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13356 (int)dd->chip_rcv_contexts,
13357 (int)dd->num_rcv_contexts,
13358 (int)dd->n_krcv_queues,
13359 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013360
13361 /*
13362 * Receive array allocation:
13363 * All RcvArray entries are divided into groups of 8. This
13364 * is required by the hardware and will speed up writes to
13365 * consecutive entries by using write-combining of the entire
13366 * cacheline.
13367 *
13368 * The number of groups are evenly divided among all contexts.
13369 * any left over groups will be given to the first N user
13370 * contexts.
13371 */
13372 dd->rcv_entries.group_size = RCV_INCREMENT;
13373 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13374 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13375 dd->rcv_entries.nctxt_extra = ngroups -
13376 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13377 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13378 dd->rcv_entries.ngroups,
13379 dd->rcv_entries.nctxt_extra);
13380 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13381 MAX_EAGER_ENTRIES * 2) {
13382 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13383 dd->rcv_entries.group_size;
13384 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013385 "RcvArray group count too high, change to %u\n",
13386 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013387 dd->rcv_entries.nctxt_extra = 0;
13388 }
13389 /*
13390 * PIO send contexts
13391 */
13392 ret = init_sc_pools_and_sizes(dd);
13393 if (ret >= 0) { /* success */
13394 dd->num_send_contexts = ret;
13395 dd_dev_info(
13396 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013397 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013398 dd->chip_send_contexts,
13399 dd->num_send_contexts,
13400 dd->sc_sizes[SC_KERNEL].count,
13401 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013402 dd->sc_sizes[SC_USER].count,
13403 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013404 ret = 0; /* success */
13405 }
13406
13407 return ret;
13408}
13409
13410/*
13411 * Set the device/port partition key table. The MAD code
13412 * will ensure that, at least, the partial management
13413 * partition key is present in the table.
13414 */
13415static void set_partition_keys(struct hfi1_pportdata *ppd)
13416{
13417 struct hfi1_devdata *dd = ppd->dd;
13418 u64 reg = 0;
13419 int i;
13420
13421 dd_dev_info(dd, "Setting partition keys\n");
13422 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13423 reg |= (ppd->pkeys[i] &
13424 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13425 ((i % 4) *
13426 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13427 /* Each register holds 4 PKey values. */
13428 if ((i % 4) == 3) {
13429 write_csr(dd, RCV_PARTITION_KEY +
13430 ((i - 3) * 2), reg);
13431 reg = 0;
13432 }
13433 }
13434
13435 /* Always enable HW pkeys check when pkeys table is set */
13436 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13437}
13438
13439/*
13440 * These CSRs and memories are uninitialized on reset and must be
13441 * written before reading to set the ECC/parity bits.
13442 *
13443 * NOTE: All user context CSRs that are not mmaped write-only
13444 * (e.g. the TID flows) must be initialized even if the driver never
13445 * reads them.
13446 */
13447static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13448{
13449 int i, j;
13450
13451 /* CceIntMap */
13452 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013453 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013454
13455 /* SendCtxtCreditReturnAddr */
13456 for (i = 0; i < dd->chip_send_contexts; i++)
13457 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13458
13459 /* PIO Send buffers */
13460 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080013461 /*
13462 * These are not normally read, and (presently) have no method
13463 * to be read, so are not pre-initialized
13464 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013465
13466 /* RcvHdrAddr */
13467 /* RcvHdrTailAddr */
13468 /* RcvTidFlowTable */
13469 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13470 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13471 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13472 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080013473 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013474 }
13475
13476 /* RcvArray */
13477 for (i = 0; i < dd->chip_rcv_array_count; i++)
Mike Marciniszyncb51c5d2017-07-24 07:45:31 -070013478 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013479
13480 /* RcvQPMapTable */
13481 for (i = 0; i < 32; i++)
13482 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13483}
13484
13485/*
13486 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13487 */
13488static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13489 u64 ctrl_bits)
13490{
13491 unsigned long timeout;
13492 u64 reg;
13493
13494 /* is the condition present? */
13495 reg = read_csr(dd, CCE_STATUS);
13496 if ((reg & status_bits) == 0)
13497 return;
13498
13499 /* clear the condition */
13500 write_csr(dd, CCE_CTRL, ctrl_bits);
13501
13502 /* wait for the condition to clear */
13503 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13504 while (1) {
13505 reg = read_csr(dd, CCE_STATUS);
13506 if ((reg & status_bits) == 0)
13507 return;
13508 if (time_after(jiffies, timeout)) {
13509 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013510 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13511 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013512 return;
13513 }
13514 udelay(1);
13515 }
13516}
13517
13518/* set CCE CSRs to chip reset defaults */
13519static void reset_cce_csrs(struct hfi1_devdata *dd)
13520{
13521 int i;
13522
13523 /* CCE_REVISION read-only */
13524 /* CCE_REVISION2 read-only */
13525 /* CCE_CTRL - bits clear automatically */
13526 /* CCE_STATUS read-only, use CceCtrl to clear */
13527 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13528 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13529 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13530 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13531 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13532 /* CCE_ERR_STATUS read-only */
13533 write_csr(dd, CCE_ERR_MASK, 0);
13534 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13535 /* CCE_ERR_FORCE leave alone */
13536 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13537 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13538 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13539 /* CCE_PCIE_CTRL leave alone */
13540 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13541 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13542 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013543 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013544 }
13545 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13546 /* CCE_MSIX_PBA read-only */
13547 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13548 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13549 }
13550 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13551 write_csr(dd, CCE_INT_MAP, 0);
13552 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13553 /* CCE_INT_STATUS read-only */
13554 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13555 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13556 /* CCE_INT_FORCE leave alone */
13557 /* CCE_INT_BLOCKED read-only */
13558 }
13559 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13560 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13561}
13562
Mike Marciniszyn77241052015-07-30 15:17:43 -040013563/* set MISC CSRs to chip reset defaults */
13564static void reset_misc_csrs(struct hfi1_devdata *dd)
13565{
13566 int i;
13567
13568 for (i = 0; i < 32; i++) {
13569 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13570 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13571 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13572 }
Jubin John4d114fd2016-02-14 20:21:43 -080013573 /*
13574 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13575 * only be written 128-byte chunks
13576 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013577 /* init RSA engine to clear lingering errors */
13578 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13579 write_csr(dd, MISC_CFG_RSA_MU, 0);
13580 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13581 /* MISC_STS_8051_DIGEST read-only */
13582 /* MISC_STS_SBM_DIGEST read-only */
13583 /* MISC_STS_PCIE_DIGEST read-only */
13584 /* MISC_STS_FAB_DIGEST read-only */
13585 /* MISC_ERR_STATUS read-only */
13586 write_csr(dd, MISC_ERR_MASK, 0);
13587 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13588 /* MISC_ERR_FORCE leave alone */
13589}
13590
13591/* set TXE CSRs to chip reset defaults */
13592static void reset_txe_csrs(struct hfi1_devdata *dd)
13593{
13594 int i;
13595
13596 /*
13597 * TXE Kernel CSRs
13598 */
13599 write_csr(dd, SEND_CTRL, 0);
13600 __cm_reset(dd, 0); /* reset CM internal state */
13601 /* SEND_CONTEXTS read-only */
13602 /* SEND_DMA_ENGINES read-only */
13603 /* SEND_PIO_MEM_SIZE read-only */
13604 /* SEND_DMA_MEM_SIZE read-only */
13605 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13606 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13607 /* SEND_PIO_ERR_STATUS read-only */
13608 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13609 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13610 /* SEND_PIO_ERR_FORCE leave alone */
13611 /* SEND_DMA_ERR_STATUS read-only */
13612 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13613 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13614 /* SEND_DMA_ERR_FORCE leave alone */
13615 /* SEND_EGRESS_ERR_STATUS read-only */
13616 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13617 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13618 /* SEND_EGRESS_ERR_FORCE leave alone */
13619 write_csr(dd, SEND_BTH_QP, 0);
13620 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13621 write_csr(dd, SEND_SC2VLT0, 0);
13622 write_csr(dd, SEND_SC2VLT1, 0);
13623 write_csr(dd, SEND_SC2VLT2, 0);
13624 write_csr(dd, SEND_SC2VLT3, 0);
13625 write_csr(dd, SEND_LEN_CHECK0, 0);
13626 write_csr(dd, SEND_LEN_CHECK1, 0);
13627 /* SEND_ERR_STATUS read-only */
13628 write_csr(dd, SEND_ERR_MASK, 0);
13629 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13630 /* SEND_ERR_FORCE read-only */
13631 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013632 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013633 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013634 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13635 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13636 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013637 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013638 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013639 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013640 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013641 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013642 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013643 /* SEND_CM_CREDIT_USED_STATUS read-only */
13644 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13645 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13646 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13647 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13648 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13649 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013650 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013651 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13652 /* SEND_CM_CREDIT_USED_VL read-only */
13653 /* SEND_CM_CREDIT_USED_VL15 read-only */
13654 /* SEND_EGRESS_CTXT_STATUS read-only */
13655 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13656 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13657 /* SEND_EGRESS_ERR_INFO read-only */
13658 /* SEND_EGRESS_ERR_SOURCE read-only */
13659
13660 /*
13661 * TXE Per-Context CSRs
13662 */
13663 for (i = 0; i < dd->chip_send_contexts; i++) {
13664 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13665 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13666 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13667 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13668 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13669 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13670 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13671 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13672 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13673 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13674 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13675 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13676 }
13677
13678 /*
13679 * TXE Per-SDMA CSRs
13680 */
13681 for (i = 0; i < dd->chip_sdma_engines; i++) {
13682 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13683 /* SEND_DMA_STATUS read-only */
13684 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13685 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13686 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13687 /* SEND_DMA_HEAD read-only */
13688 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13689 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13690 /* SEND_DMA_IDLE_CNT read-only */
13691 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13692 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13693 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13694 /* SEND_DMA_ENG_ERR_STATUS read-only */
13695 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13696 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13697 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13698 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13699 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13700 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13701 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13702 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13703 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13704 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13705 }
13706}
13707
13708/*
13709 * Expect on entry:
13710 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13711 */
13712static void init_rbufs(struct hfi1_devdata *dd)
13713{
13714 u64 reg;
13715 int count;
13716
13717 /*
13718 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13719 * clear.
13720 */
13721 count = 0;
13722 while (1) {
13723 reg = read_csr(dd, RCV_STATUS);
13724 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13725 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13726 break;
13727 /*
13728 * Give up after 1ms - maximum wait time.
13729 *
Harish Chegondie8a70af2016-09-25 07:42:01 -070013730 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
Mike Marciniszyn77241052015-07-30 15:17:43 -040013731 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
Harish Chegondie8a70af2016-09-25 07:42:01 -070013732 * 136 KB / (66% * 250MB/s) = 844us
Mike Marciniszyn77241052015-07-30 15:17:43 -040013733 */
13734 if (count++ > 500) {
13735 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013736 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13737 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013738 break;
13739 }
13740 udelay(2); /* do not busy-wait the CSR */
13741 }
13742
13743 /* start the init - expect RcvCtrl to be 0 */
13744 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13745
13746 /*
13747 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13748 * period after the write before RcvStatus.RxRbufInitDone is valid.
13749 * The delay in the first run through the loop below is sufficient and
13750 * required before the first read of RcvStatus.RxRbufInintDone.
13751 */
13752 read_csr(dd, RCV_CTRL);
13753
13754 /* wait for the init to finish */
13755 count = 0;
13756 while (1) {
13757 /* delay is required first time through - see above */
13758 udelay(2); /* do not busy-wait the CSR */
13759 reg = read_csr(dd, RCV_STATUS);
13760 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13761 break;
13762
13763 /* give up after 100us - slowest possible at 33MHz is 73us */
13764 if (count++ > 50) {
13765 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013766 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13767 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013768 break;
13769 }
13770 }
13771}
13772
13773/* set RXE CSRs to chip reset defaults */
13774static void reset_rxe_csrs(struct hfi1_devdata *dd)
13775{
13776 int i, j;
13777
13778 /*
13779 * RXE Kernel CSRs
13780 */
13781 write_csr(dd, RCV_CTRL, 0);
13782 init_rbufs(dd);
13783 /* RCV_STATUS read-only */
13784 /* RCV_CONTEXTS read-only */
13785 /* RCV_ARRAY_CNT read-only */
13786 /* RCV_BUF_SIZE read-only */
13787 write_csr(dd, RCV_BTH_QP, 0);
13788 write_csr(dd, RCV_MULTICAST, 0);
13789 write_csr(dd, RCV_BYPASS, 0);
13790 write_csr(dd, RCV_VL15, 0);
13791 /* this is a clear-down */
13792 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013793 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013794 /* RCV_ERR_STATUS read-only */
13795 write_csr(dd, RCV_ERR_MASK, 0);
13796 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13797 /* RCV_ERR_FORCE leave alone */
13798 for (i = 0; i < 32; i++)
13799 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13800 for (i = 0; i < 4; i++)
13801 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13802 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13803 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13804 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13805 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013806 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13807 clear_rsm_rule(dd, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013808 for (i = 0; i < 32; i++)
13809 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13810
13811 /*
13812 * RXE Kernel and User Per-Context CSRs
13813 */
13814 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13815 /* kernel */
13816 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13817 /* RCV_CTXT_STATUS read-only */
13818 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13819 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13820 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13821 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13822 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13823 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13824 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13825 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13826 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13827 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13828
13829 /* user */
13830 /* RCV_HDR_TAIL read-only */
13831 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13832 /* RCV_EGR_INDEX_TAIL read-only */
13833 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13834 /* RCV_EGR_OFFSET_TAIL read-only */
13835 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013836 write_uctxt_csr(dd, i,
13837 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013838 }
13839 }
13840}
13841
13842/*
13843 * Set sc2vl tables.
13844 *
13845 * They power on to zeros, so to avoid send context errors
13846 * they need to be set:
13847 *
13848 * SC 0-7 -> VL 0-7 (respectively)
13849 * SC 15 -> VL 15
13850 * otherwise
13851 * -> VL 0
13852 */
13853static void init_sc2vl_tables(struct hfi1_devdata *dd)
13854{
13855 int i;
13856 /* init per architecture spec, constrained by hardware capability */
13857
13858 /* HFI maps sent packets */
13859 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13860 0,
13861 0, 0, 1, 1,
13862 2, 2, 3, 3,
13863 4, 4, 5, 5,
13864 6, 6, 7, 7));
13865 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13866 1,
13867 8, 0, 9, 0,
13868 10, 0, 11, 0,
13869 12, 0, 13, 0,
13870 14, 0, 15, 15));
13871 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13872 2,
13873 16, 0, 17, 0,
13874 18, 0, 19, 0,
13875 20, 0, 21, 0,
13876 22, 0, 23, 0));
13877 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13878 3,
13879 24, 0, 25, 0,
13880 26, 0, 27, 0,
13881 28, 0, 29, 0,
13882 30, 0, 31, 0));
13883
13884 /* DC maps received packets */
13885 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13886 15_0,
13887 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13888 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13889 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13890 31_16,
13891 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13892 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13893
13894 /* initialize the cached sc2vl values consistently with h/w */
13895 for (i = 0; i < 32; i++) {
13896 if (i < 8 || i == 15)
13897 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13898 else
13899 *((u8 *)(dd->sc2vl) + i) = 0;
13900 }
13901}
13902
13903/*
13904 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13905 * depend on the chip going through a power-on reset - a driver may be loaded
13906 * and unloaded many times.
13907 *
13908 * Do not write any CSR values to the chip in this routine - there may be
13909 * a reset following the (possible) FLR in this routine.
13910 *
13911 */
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070013912static int init_chip(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013913{
13914 int i;
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070013915 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013916
13917 /*
13918 * Put the HFI CSRs in a known state.
13919 * Combine this with a DC reset.
13920 *
13921 * Stop the device from doing anything while we do a
13922 * reset. We know there are no other active users of
13923 * the device since we are now in charge. Turn off
13924 * off all outbound and inbound traffic and make sure
13925 * the device does not generate any interrupts.
13926 */
13927
13928 /* disable send contexts and SDMA engines */
13929 write_csr(dd, SEND_CTRL, 0);
13930 for (i = 0; i < dd->chip_send_contexts; i++)
13931 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13932 for (i = 0; i < dd->chip_sdma_engines; i++)
13933 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13934 /* disable port (turn off RXE inbound traffic) and contexts */
13935 write_csr(dd, RCV_CTRL, 0);
13936 for (i = 0; i < dd->chip_rcv_contexts; i++)
13937 write_csr(dd, RCV_CTXT_CTRL, 0);
13938 /* mask all interrupt sources */
13939 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013940 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013941
13942 /*
13943 * DC Reset: do a full DC reset before the register clear.
13944 * A recommended length of time to hold is one CSR read,
13945 * so reread the CceDcCtrl. Then, hold the DC in reset
13946 * across the clear.
13947 */
13948 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013949 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013950
13951 if (use_flr) {
13952 /*
13953 * A FLR will reset the SPC core and part of the PCIe.
13954 * The parts that need to be restored have already been
13955 * saved.
13956 */
13957 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13958
13959 /* do the FLR, the DC reset will remain */
Christoph Hellwig21c433a2017-04-25 14:36:19 -050013960 pcie_flr(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013961
13962 /* restore command and BARs */
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070013963 ret = restore_pci_variables(dd);
13964 if (ret) {
13965 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13966 __func__);
13967 return ret;
13968 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013969
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013970 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013971 dd_dev_info(dd, "Resetting CSRs with FLR\n");
Christoph Hellwig21c433a2017-04-25 14:36:19 -050013972 pcie_flr(dd->pcidev);
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070013973 ret = restore_pci_variables(dd);
13974 if (ret) {
13975 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13976 __func__);
13977 return ret;
13978 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013979 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013980 } else {
13981 dd_dev_info(dd, "Resetting CSRs with writes\n");
13982 reset_cce_csrs(dd);
13983 reset_txe_csrs(dd);
13984 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013985 reset_misc_csrs(dd);
13986 }
13987 /* clear the DC reset */
13988 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013989
Mike Marciniszyn77241052015-07-30 15:17:43 -040013990 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013991 setextled(dd, 0);
13992
Mike Marciniszyn77241052015-07-30 15:17:43 -040013993 /*
13994 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013995 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013996 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013997 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013998 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013999 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014000 * I2CCLK and I2CDAT will change per direction, and INT_N and
14001 * MODPRS_N are input only and their value is ignored.
14002 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050014003 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14004 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014005 init_chip_resources(dd);
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070014006 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014007}
14008
14009static void init_early_variables(struct hfi1_devdata *dd)
14010{
14011 int i;
14012
14013 /* assign link credit variables */
14014 dd->vau = CM_VAU;
14015 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014016 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040014017 dd->link_credits--;
14018 dd->vcu = cu_to_vcu(hfi1_cu);
14019 /* enough room for 8 MAD packets plus header - 17K */
14020 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14021 if (dd->vl15_init > dd->link_credits)
14022 dd->vl15_init = dd->link_credits;
14023
14024 write_uninitialized_csrs_and_memories(dd);
14025
14026 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14027 for (i = 0; i < dd->num_pports; i++) {
14028 struct hfi1_pportdata *ppd = &dd->pport[i];
14029
14030 set_partition_keys(ppd);
14031 }
14032 init_sc2vl_tables(dd);
14033}
14034
14035static void init_kdeth_qp(struct hfi1_devdata *dd)
14036{
14037 /* user changed the KDETH_QP */
14038 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14039 /* out of range or illegal value */
14040 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14041 kdeth_qp = 0;
14042 }
14043 if (kdeth_qp == 0) /* not set, or failed range check */
14044 kdeth_qp = DEFAULT_KDETH_QP;
14045
14046 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080014047 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14048 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014049
14050 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080014051 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14052 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014053}
14054
14055/**
14056 * init_qpmap_table
14057 * @dd - device data
14058 * @first_ctxt - first context
14059 * @last_ctxt - first context
14060 *
14061 * This return sets the qpn mapping table that
14062 * is indexed by qpn[8:1].
14063 *
14064 * The routine will round robin the 256 settings
14065 * from first_ctxt to last_ctxt.
14066 *
14067 * The first/last looks ahead to having specialized
14068 * receive contexts for mgmt and bypass. Normal
14069 * verbs traffic will assumed to be on a range
14070 * of receive contexts.
14071 */
14072static void init_qpmap_table(struct hfi1_devdata *dd,
14073 u32 first_ctxt,
14074 u32 last_ctxt)
14075{
14076 u64 reg = 0;
14077 u64 regno = RCV_QP_MAP_TABLE;
14078 int i;
14079 u64 ctxt = first_ctxt;
14080
Dean Luick60d585ad2016-04-12 10:50:35 -070014081 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014082 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014083 ctxt++;
14084 if (ctxt > last_ctxt)
14085 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070014086 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014087 write_csr(dd, regno, reg);
14088 reg = 0;
14089 regno += 8;
14090 }
14091 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040014092
14093 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14094 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14095}
14096
Dean Luick372cc85a2016-04-12 11:30:51 -070014097struct rsm_map_table {
14098 u64 map[NUM_MAP_REGS];
14099 unsigned int used;
14100};
14101
Dean Luickb12349a2016-04-12 11:31:33 -070014102struct rsm_rule_data {
14103 u8 offset;
14104 u8 pkt_type;
14105 u32 field1_off;
14106 u32 field2_off;
14107 u32 index1_off;
14108 u32 index1_width;
14109 u32 index2_off;
14110 u32 index2_width;
14111 u32 mask1;
14112 u32 value1;
14113 u32 mask2;
14114 u32 value2;
14115};
14116
Dean Luick372cc85a2016-04-12 11:30:51 -070014117/*
14118 * Return an initialized RMT map table for users to fill in. OK if it
14119 * returns NULL, indicating no table.
14120 */
14121static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14122{
14123 struct rsm_map_table *rmt;
14124 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14125
14126 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14127 if (rmt) {
14128 memset(rmt->map, rxcontext, sizeof(rmt->map));
14129 rmt->used = 0;
14130 }
14131
14132 return rmt;
14133}
14134
14135/*
14136 * Write the final RMT map table to the chip and free the table. OK if
14137 * table is NULL.
14138 */
14139static void complete_rsm_map_table(struct hfi1_devdata *dd,
14140 struct rsm_map_table *rmt)
14141{
14142 int i;
14143
14144 if (rmt) {
14145 /* write table to chip */
14146 for (i = 0; i < NUM_MAP_REGS; i++)
14147 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14148
14149 /* enable RSM */
14150 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14151 }
14152}
14153
Dean Luickb12349a2016-04-12 11:31:33 -070014154/*
14155 * Add a receive side mapping rule.
14156 */
14157static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14158 struct rsm_rule_data *rrd)
14159{
14160 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14161 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14162 1ull << rule_index | /* enable bit */
14163 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14164 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14165 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14166 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14167 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14168 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14169 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14170 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14171 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14172 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14173 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14174 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14175 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14176}
14177
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014178/*
14179 * Clear a receive side mapping rule.
14180 */
14181static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14182{
14183 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14184 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14185 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14186}
14187
Dean Luick4a818be2016-04-12 11:31:11 -070014188/* return the number of RSM map table entries that will be used for QOS */
14189static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14190 unsigned int *np)
14191{
14192 int i;
14193 unsigned int m, n;
14194 u8 max_by_vl = 0;
14195
14196 /* is QOS active at all? */
14197 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14198 num_vls == 1 ||
14199 krcvqsset <= 1)
14200 goto no_qos;
14201
14202 /* determine bits for qpn */
14203 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14204 if (krcvqs[i] > max_by_vl)
14205 max_by_vl = krcvqs[i];
14206 if (max_by_vl > 32)
14207 goto no_qos;
14208 m = ilog2(__roundup_pow_of_two(max_by_vl));
14209
14210 /* determine bits for vl */
14211 n = ilog2(__roundup_pow_of_two(num_vls));
14212
14213 /* reject if too much is used */
14214 if ((m + n) > 7)
14215 goto no_qos;
14216
14217 if (mp)
14218 *mp = m;
14219 if (np)
14220 *np = n;
14221
14222 return 1 << (m + n);
14223
14224no_qos:
14225 if (mp)
14226 *mp = 0;
14227 if (np)
14228 *np = 0;
14229 return 0;
14230}
14231
Mike Marciniszyn77241052015-07-30 15:17:43 -040014232/**
14233 * init_qos - init RX qos
14234 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070014235 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040014236 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014237 * This routine initializes Rule 0 and the RSM map table to implement
14238 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040014239 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014240 * If all of the limit tests succeed, qos is applied based on the array
14241 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014242 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014243 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14244 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014245 */
Dean Luick372cc85a2016-04-12 11:30:51 -070014246static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014247{
Dean Luickb12349a2016-04-12 11:31:33 -070014248 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014249 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070014250 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014251 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014252
Dean Luick4a818be2016-04-12 11:31:11 -070014253 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014254 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014255 rmt_entries = qos_rmt_entries(dd, &m, &n);
14256 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014257 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014258 qpns_per_vl = 1 << m;
14259
Dean Luick372cc85a2016-04-12 11:30:51 -070014260 /* enough room in the map table? */
14261 rmt_entries = 1 << (m + n);
14262 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050014263 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014264
Dean Luick372cc85a2016-04-12 11:30:51 -070014265 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070014266 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014267 unsigned tctxt;
14268
14269 for (qpn = 0, tctxt = ctxt;
14270 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14271 unsigned idx, regoff, regidx;
14272
Dean Luick372cc85a2016-04-12 11:30:51 -070014273 /* generate the index the hardware will produce */
14274 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014275 regoff = (idx % 8) * 8;
14276 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070014277 /* replace default with context number */
14278 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040014279 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14280 << regoff);
14281 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070014282 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014283 if (tctxt == ctxt + krcvqs[i])
14284 tctxt = ctxt;
14285 }
14286 ctxt += krcvqs[i];
14287 }
Dean Luickb12349a2016-04-12 11:31:33 -070014288
14289 rrd.offset = rmt->used;
14290 rrd.pkt_type = 2;
14291 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14292 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14293 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14294 rrd.index1_width = n;
14295 rrd.index2_off = QPN_SELECT_OFFSET;
14296 rrd.index2_width = m + n;
14297 rrd.mask1 = LRH_BTH_MASK;
14298 rrd.value1 = LRH_BTH_VALUE;
14299 rrd.mask2 = LRH_SC_MASK;
14300 rrd.value2 = LRH_SC_VALUE;
14301
14302 /* add rule 0 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014303 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
Dean Luickb12349a2016-04-12 11:31:33 -070014304
Dean Luick372cc85a2016-04-12 11:30:51 -070014305 /* mark RSM map entries as used */
14306 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070014307 /* map everything else to the mcast/err/vl15 context */
14308 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014309 dd->qos_shift = n + 1;
14310 return;
14311bail:
14312 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050014313 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014314}
14315
Dean Luick8f000f72016-04-12 11:32:06 -070014316static void init_user_fecn_handling(struct hfi1_devdata *dd,
14317 struct rsm_map_table *rmt)
14318{
14319 struct rsm_rule_data rrd;
14320 u64 reg;
14321 int i, idx, regoff, regidx;
14322 u8 offset;
14323
14324 /* there needs to be enough room in the map table */
14325 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14326 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14327 return;
14328 }
14329
14330 /*
14331 * RSM will extract the destination context as an index into the
14332 * map table. The destination contexts are a sequential block
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014333 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
Dean Luick8f000f72016-04-12 11:32:06 -070014334 * Map entries are accessed as offset + extracted value. Adjust
14335 * the added offset so this sequence can be placed anywhere in
14336 * the table - as long as the entries themselves do not wrap.
14337 * There are only enough bits in offset for the table size, so
14338 * start with that to allow for a "negative" offset.
14339 */
14340 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014341 (int)dd->first_dyn_alloc_ctxt);
Dean Luick8f000f72016-04-12 11:32:06 -070014342
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014343 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
Dean Luick8f000f72016-04-12 11:32:06 -070014344 i < dd->num_rcv_contexts; i++, idx++) {
14345 /* replace with identity mapping */
14346 regoff = (idx % 8) * 8;
14347 regidx = idx / 8;
14348 reg = rmt->map[regidx];
14349 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14350 reg |= (u64)i << regoff;
14351 rmt->map[regidx] = reg;
14352 }
14353
14354 /*
14355 * For RSM intercept of Expected FECN packets:
14356 * o packet type 0 - expected
14357 * o match on F (bit 95), using select/match 1, and
14358 * o match on SH (bit 133), using select/match 2.
14359 *
14360 * Use index 1 to extract the 8-bit receive context from DestQP
14361 * (start at bit 64). Use that as the RSM map table index.
14362 */
14363 rrd.offset = offset;
14364 rrd.pkt_type = 0;
14365 rrd.field1_off = 95;
14366 rrd.field2_off = 133;
14367 rrd.index1_off = 64;
14368 rrd.index1_width = 8;
14369 rrd.index2_off = 0;
14370 rrd.index2_width = 0;
14371 rrd.mask1 = 1;
14372 rrd.value1 = 1;
14373 rrd.mask2 = 1;
14374 rrd.value2 = 1;
14375
14376 /* add rule 1 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014377 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
Dean Luick8f000f72016-04-12 11:32:06 -070014378
14379 rmt->used += dd->num_user_contexts;
14380}
14381
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014382/* Initialize RSM for VNIC */
14383void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14384{
14385 u8 i, j;
14386 u8 ctx_id = 0;
14387 u64 reg;
14388 u32 regoff;
14389 struct rsm_rule_data rrd;
14390
14391 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14392 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14393 dd->vnic.rmt_start);
14394 return;
14395 }
14396
14397 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14398 dd->vnic.rmt_start,
14399 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14400
14401 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14402 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14403 reg = read_csr(dd, regoff);
14404 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14405 /* Update map register with vnic context */
14406 j = (dd->vnic.rmt_start + i) % 8;
14407 reg &= ~(0xffllu << (j * 8));
14408 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14409 /* Wrap up vnic ctx index */
14410 ctx_id %= dd->vnic.num_ctxt;
14411 /* Write back map register */
14412 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14413 dev_dbg(&(dd)->pcidev->dev,
14414 "Vnic rsm map reg[%d] =0x%llx\n",
14415 regoff - RCV_RSM_MAP_TABLE, reg);
14416
14417 write_csr(dd, regoff, reg);
14418 regoff += 8;
14419 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14420 reg = read_csr(dd, regoff);
14421 }
14422 }
14423
14424 /* Add rule for vnic */
14425 rrd.offset = dd->vnic.rmt_start;
14426 rrd.pkt_type = 4;
14427 /* Match 16B packets */
14428 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14429 rrd.mask1 = L2_TYPE_MASK;
14430 rrd.value1 = L2_16B_VALUE;
14431 /* Match ETH L4 packets */
14432 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14433 rrd.mask2 = L4_16B_TYPE_MASK;
14434 rrd.value2 = L4_16B_ETH_VALUE;
14435 /* Calc context from veswid and entropy */
14436 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14437 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14438 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14439 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14440 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14441
14442 /* Enable RSM if not already enabled */
14443 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14444}
14445
14446void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14447{
14448 clear_rsm_rule(dd, RSM_INS_VNIC);
14449
14450 /* Disable RSM if used only by vnic */
14451 if (dd->vnic.rmt_start == 0)
14452 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14453}
14454
Mike Marciniszyn77241052015-07-30 15:17:43 -040014455static void init_rxe(struct hfi1_devdata *dd)
14456{
Dean Luick372cc85a2016-04-12 11:30:51 -070014457 struct rsm_map_table *rmt;
14458
Mike Marciniszyn77241052015-07-30 15:17:43 -040014459 /* enable all receive errors */
14460 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070014461
14462 rmt = alloc_rsm_map_table(dd);
14463 /* set up QOS, including the QPN map table */
14464 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070014465 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070014466 complete_rsm_map_table(dd, rmt);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014467 /* record number of used rsm map entries for vnic */
14468 dd->vnic.rmt_start = rmt->used;
Dean Luick372cc85a2016-04-12 11:30:51 -070014469 kfree(rmt);
14470
Mike Marciniszyn77241052015-07-30 15:17:43 -040014471 /*
14472 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14473 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14474 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14475 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14476 * Max_PayLoad_Size set to its minimum of 128.
14477 *
14478 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14479 * (64 bytes). Max_Payload_Size is possibly modified upward in
14480 * tune_pcie_caps() which is called after this routine.
14481 */
14482}
14483
14484static void init_other(struct hfi1_devdata *dd)
14485{
14486 /* enable all CCE errors */
14487 write_csr(dd, CCE_ERR_MASK, ~0ull);
14488 /* enable *some* Misc errors */
14489 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14490 /* enable all DC errors, except LCB */
14491 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14492 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14493}
14494
14495/*
14496 * Fill out the given AU table using the given CU. A CU is defined in terms
14497 * AUs. The table is a an encoding: given the index, how many AUs does that
14498 * represent?
14499 *
14500 * NOTE: Assumes that the register layout is the same for the
14501 * local and remote tables.
14502 */
14503static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14504 u32 csr0to3, u32 csr4to7)
14505{
14506 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080014507 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14508 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14509 2ull * cu <<
14510 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14511 4ull * cu <<
14512 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014513 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080014514 8ull * cu <<
14515 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14516 16ull * cu <<
14517 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14518 32ull * cu <<
14519 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14520 64ull * cu <<
14521 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014522}
14523
14524static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14525{
14526 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014527 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014528}
14529
14530void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14531{
14532 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014533 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014534}
14535
14536static void init_txe(struct hfi1_devdata *dd)
14537{
14538 int i;
14539
14540 /* enable all PIO, SDMA, general, and Egress errors */
14541 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14542 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14543 write_csr(dd, SEND_ERR_MASK, ~0ull);
14544 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14545
14546 /* enable all per-context and per-SDMA engine errors */
14547 for (i = 0; i < dd->chip_send_contexts; i++)
14548 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14549 for (i = 0; i < dd->chip_sdma_engines; i++)
14550 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14551
14552 /* set the local CU to AU mapping */
14553 assign_local_cm_au_table(dd, dd->vcu);
14554
14555 /*
14556 * Set reasonable default for Credit Return Timer
14557 * Don't set on Simulator - causes it to choke.
14558 */
14559 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14560 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14561}
14562
Michael J. Ruhl17573972017-07-24 07:46:01 -070014563int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14564 u16 jkey)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014565{
Michael J. Ruhl17573972017-07-24 07:46:01 -070014566 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014567 u64 reg;
14568
Michael J. Ruhl17573972017-07-24 07:46:01 -070014569 if (!rcd || !rcd->sc)
14570 return -EINVAL;
14571
14572 hw_ctxt = rcd->sc->hw_context;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014573 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14574 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14575 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14576 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14577 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14578 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014579 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014580 /*
14581 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040014582 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014583 if (!is_ax(dd)) {
Michael J. Ruhl17573972017-07-24 07:46:01 -070014584 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014585 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014586 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014587 }
14588
14589 /* Enable J_KEY check on receive context. */
14590 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14591 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14592 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
Michael J. Ruhl17573972017-07-24 07:46:01 -070014593 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14594
14595 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014596}
14597
Michael J. Ruhl17573972017-07-24 07:46:01 -070014598int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014599{
Michael J. Ruhl17573972017-07-24 07:46:01 -070014600 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014601 u64 reg;
14602
Michael J. Ruhl17573972017-07-24 07:46:01 -070014603 if (!rcd || !rcd->sc)
14604 return -EINVAL;
14605
14606 hw_ctxt = rcd->sc->hw_context;
14607 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014608 /*
14609 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14610 * This check would not have been enabled for A0 h/w, see
14611 * set_ctxt_jkey().
14612 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014613 if (!is_ax(dd)) {
Michael J. Ruhl17573972017-07-24 07:46:01 -070014614 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014615 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014616 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014617 }
14618 /* Turn off the J_KEY on the receive side */
Michael J. Ruhl17573972017-07-24 07:46:01 -070014619 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14620
14621 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014622}
14623
Michael J. Ruhl17573972017-07-24 07:46:01 -070014624int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14625 u16 pkey)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014626{
Michael J. Ruhl17573972017-07-24 07:46:01 -070014627 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014628 u64 reg;
14629
Michael J. Ruhl17573972017-07-24 07:46:01 -070014630 if (!rcd || !rcd->sc)
14631 return -EINVAL;
14632
14633 hw_ctxt = rcd->sc->hw_context;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014634 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14635 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014636 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14637 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014638 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014639 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Michael J. Ruhl17573972017-07-24 07:46:01 -070014640 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14641
14642 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014643}
14644
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014645int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014646{
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014647 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014648 u64 reg;
14649
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014650 if (!ctxt || !ctxt->sc)
14651 return -EINVAL;
14652
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014653 hw_ctxt = ctxt->sc->hw_context;
14654 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014655 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014656 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14657 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14658
14659 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014660}
14661
14662/*
14663 * Start doing the clean up the the chip. Our clean up happens in multiple
14664 * stages and this is just the first.
14665 */
14666void hfi1_start_cleanup(struct hfi1_devdata *dd)
14667{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014668 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014669 free_cntrs(dd);
14670 free_rcverr(dd);
14671 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014672 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014673}
14674
14675#define HFI_BASE_GUID(dev) \
14676 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14677
14678/*
Dean Luick78eb1292016-03-05 08:49:45 -080014679 * Information can be shared between the two HFIs on the same ASIC
14680 * in the same OS. This function finds the peer device and sets
14681 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014682 */
Dean Luick78eb1292016-03-05 08:49:45 -080014683static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014684{
14685 unsigned long flags;
14686 struct hfi1_devdata *tmp, *peer = NULL;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014687 struct hfi1_asic_data *asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014688 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014689
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014690 /* pre-allocate the asic structure in case we are the first device */
14691 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14692 if (!asic_data)
14693 return -ENOMEM;
14694
Mike Marciniszyn77241052015-07-30 15:17:43 -040014695 spin_lock_irqsave(&hfi1_devs_lock, flags);
14696 /* Find our peer device */
14697 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14698 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14699 dd->unit != tmp->unit) {
14700 peer = tmp;
14701 break;
14702 }
14703 }
14704
Dean Luick78eb1292016-03-05 08:49:45 -080014705 if (peer) {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014706 /* use already allocated structure */
Dean Luick78eb1292016-03-05 08:49:45 -080014707 dd->asic_data = peer->asic_data;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014708 kfree(asic_data);
Dean Luick78eb1292016-03-05 08:49:45 -080014709 } else {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014710 dd->asic_data = asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014711 mutex_init(&dd->asic_data->asic_resource_mutex);
14712 }
14713 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014714 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luickdba715f2016-07-06 17:28:52 -040014715
14716 /* first one through - set up i2c devices */
14717 if (!peer)
14718 ret = set_up_i2c(dd, dd->asic_data);
14719
Dean Luick78eb1292016-03-05 08:49:45 -080014720 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014721}
14722
Dean Luick5d9157a2015-11-16 21:59:34 -050014723/*
14724 * Set dd->boardname. Use a generic name if a name is not returned from
14725 * EFI variable space.
14726 *
14727 * Return 0 on success, -ENOMEM if space could not be allocated.
14728 */
14729static int obtain_boardname(struct hfi1_devdata *dd)
14730{
14731 /* generic board description */
14732 const char generic[] =
14733 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14734 unsigned long size;
14735 int ret;
14736
14737 ret = read_hfi1_efi_var(dd, "description", &size,
14738 (void **)&dd->boardname);
14739 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014740 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014741 /* use generic description */
14742 dd->boardname = kstrdup(generic, GFP_KERNEL);
14743 if (!dd->boardname)
14744 return -ENOMEM;
14745 }
14746 return 0;
14747}
14748
Kaike Wan24487dd2016-02-26 13:33:23 -080014749/*
14750 * Check the interrupt registers to make sure that they are mapped correctly.
14751 * It is intended to help user identify any mismapping by VMM when the driver
14752 * is running in a VM. This function should only be called before interrupt
14753 * is set up properly.
14754 *
14755 * Return 0 on success, -EINVAL on failure.
14756 */
14757static int check_int_registers(struct hfi1_devdata *dd)
14758{
14759 u64 reg;
14760 u64 all_bits = ~(u64)0;
14761 u64 mask;
14762
14763 /* Clear CceIntMask[0] to avoid raising any interrupts */
14764 mask = read_csr(dd, CCE_INT_MASK);
14765 write_csr(dd, CCE_INT_MASK, 0ull);
14766 reg = read_csr(dd, CCE_INT_MASK);
14767 if (reg)
14768 goto err_exit;
14769
14770 /* Clear all interrupt status bits */
14771 write_csr(dd, CCE_INT_CLEAR, all_bits);
14772 reg = read_csr(dd, CCE_INT_STATUS);
14773 if (reg)
14774 goto err_exit;
14775
14776 /* Set all interrupt status bits */
14777 write_csr(dd, CCE_INT_FORCE, all_bits);
14778 reg = read_csr(dd, CCE_INT_STATUS);
14779 if (reg != all_bits)
14780 goto err_exit;
14781
14782 /* Restore the interrupt mask */
14783 write_csr(dd, CCE_INT_CLEAR, all_bits);
14784 write_csr(dd, CCE_INT_MASK, mask);
14785
14786 return 0;
14787err_exit:
14788 write_csr(dd, CCE_INT_MASK, mask);
14789 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14790 return -EINVAL;
14791}
14792
Mike Marciniszyn77241052015-07-30 15:17:43 -040014793/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014794 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014795 * @dev: the pci_dev for hfi1_ib device
14796 * @ent: pci_device_id struct for this dev
14797 *
14798 * Also allocates, initializes, and returns the devdata struct for this
14799 * device instance
14800 *
14801 * This is global, and is called directly at init to set up the
14802 * chip-specific function pointers for later use.
14803 */
14804struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14805 const struct pci_device_id *ent)
14806{
14807 struct hfi1_devdata *dd;
14808 struct hfi1_pportdata *ppd;
14809 u64 reg;
14810 int i, ret;
14811 static const char * const inames[] = { /* implementation names */
14812 "RTL silicon",
14813 "RTL VCS simulation",
14814 "RTL FPGA emulation",
14815 "Functional simulator"
14816 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014817 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014818
Jubin John17fb4f22016-02-14 20:21:52 -080014819 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14820 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014821 if (IS_ERR(dd))
14822 goto bail;
14823 ppd = dd->pport;
14824 for (i = 0; i < dd->num_pports; i++, ppd++) {
14825 int vl;
14826 /* init common fields */
14827 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14828 /* DC supports 4 link widths */
14829 ppd->link_width_supported =
14830 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14831 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14832 ppd->link_width_downgrade_supported =
14833 ppd->link_width_supported;
14834 /* start out enabling only 4X */
14835 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14836 ppd->link_width_downgrade_enabled =
14837 ppd->link_width_downgrade_supported;
14838 /* link width active is 0 when link is down */
14839 /* link width downgrade active is 0 when link is down */
14840
Jubin Johnd0d236e2016-02-14 20:20:15 -080014841 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14842 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014843 hfi1_early_err(&pdev->dev,
14844 "Invalid num_vls %u, using %u VLs\n",
14845 num_vls, HFI1_MAX_VLS_SUPPORTED);
14846 num_vls = HFI1_MAX_VLS_SUPPORTED;
14847 }
14848 ppd->vls_supported = num_vls;
14849 ppd->vls_operational = ppd->vls_supported;
14850 /* Set the default MTU. */
14851 for (vl = 0; vl < num_vls; vl++)
14852 dd->vld[vl].mtu = hfi1_max_mtu;
14853 dd->vld[15].mtu = MAX_MAD_PACKET;
14854 /*
14855 * Set the initial values to reasonable default, will be set
14856 * for real when link is up.
14857 */
14858 ppd->lstate = IB_PORT_DOWN;
14859 ppd->overrun_threshold = 0x4;
14860 ppd->phy_error_threshold = 0xf;
14861 ppd->port_crc_mode_enabled = link_crc_mask;
14862 /* initialize supported LTP CRC mode */
14863 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14864 /* initialize enabled LTP CRC mode */
14865 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14866 /* start in offline */
14867 ppd->host_link_state = HLS_DN_OFFLINE;
14868 init_vl_arb_caches(ppd);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070014869 ppd->pstate = PLS_OFFLINE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014870 }
14871
14872 dd->link_default = HLS_DN_POLL;
14873
14874 /*
14875 * Do remaining PCIe setup and save PCIe values in dd.
14876 * Any error printing is already done by the init code.
14877 * On return, we have the chip mapped.
14878 */
Easwar Hariharan26ea2542016-10-17 04:19:58 -070014879 ret = hfi1_pcie_ddinit(dd, pdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014880 if (ret < 0)
14881 goto bail_free;
14882
Bartlomiej Dudeka618b7e2017-07-24 07:46:30 -070014883 /* Save PCI space registers to rewrite after device reset */
14884 ret = save_pci_variables(dd);
14885 if (ret < 0)
14886 goto bail_cleanup;
14887
Mike Marciniszyn77241052015-07-30 15:17:43 -040014888 /* verify that reads actually work, save revision for reset check */
14889 dd->revision = read_csr(dd, CCE_REVISION);
14890 if (dd->revision == ~(u64)0) {
14891 dd_dev_err(dd, "cannot read chip CSRs\n");
14892 ret = -EINVAL;
14893 goto bail_cleanup;
14894 }
14895 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14896 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14897 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14898 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14899
Jubin John4d114fd2016-02-14 20:21:43 -080014900 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014901 * Check interrupt registers mapping if the driver has no access to
14902 * the upstream component. In this case, it is likely that the driver
14903 * is running in a VM.
14904 */
14905 if (!parent) {
14906 ret = check_int_registers(dd);
14907 if (ret)
14908 goto bail_cleanup;
14909 }
14910
14911 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014912 * obtain the hardware ID - NOT related to unit, which is a
14913 * software enumeration
14914 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014915 reg = read_csr(dd, CCE_REVISION2);
14916 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14917 & CCE_REVISION2_HFI_ID_MASK;
14918 /* the variable size will remove unwanted bits */
14919 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14920 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14921 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014922 dd->icode < ARRAY_SIZE(inames) ?
14923 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014924
14925 /* speeds the hardware can support */
14926 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14927 /* speeds allowed to run at */
14928 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14929 /* give a reasonable active value, will be set on link up */
14930 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14931
14932 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14933 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14934 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14935 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14936 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14937 /* fix up link widths for emulation _p */
14938 ppd = dd->pport;
14939 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14940 ppd->link_width_supported =
14941 ppd->link_width_enabled =
14942 ppd->link_width_downgrade_supported =
14943 ppd->link_width_downgrade_enabled =
14944 OPA_LINK_WIDTH_1X;
14945 }
14946 /* insure num_vls isn't larger than number of sdma engines */
14947 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14948 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014949 num_vls, dd->chip_sdma_engines);
14950 num_vls = dd->chip_sdma_engines;
14951 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014952 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014953 }
14954
14955 /*
14956 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14957 * Limit the max if larger than the field holds. If timeout is
14958 * non-zero, then the calculated field will be at least 1.
14959 *
14960 * Must be after icode is set up - the cclock rate depends
14961 * on knowing the hardware being used.
14962 */
14963 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14964 if (dd->rcv_intr_timeout_csr >
14965 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14966 dd->rcv_intr_timeout_csr =
14967 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14968 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14969 dd->rcv_intr_timeout_csr = 1;
14970
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014971 /* needs to be done before we look for the peer device */
14972 read_guid(dd);
14973
Dean Luick78eb1292016-03-05 08:49:45 -080014974 /* set up shared ASIC data with peer device */
14975 ret = init_asic_data(dd);
14976 if (ret)
14977 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014978
Mike Marciniszyn77241052015-07-30 15:17:43 -040014979 /* obtain chip sizes, reset chip CSRs */
Bartlomiej Dudekc53df622017-06-30 13:14:40 -070014980 ret = init_chip(dd);
14981 if (ret)
14982 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014983
14984 /* read in the PCIe link speed information */
14985 ret = pcie_speeds(dd);
14986 if (ret)
14987 goto bail_cleanup;
14988
Dean Luicke83eba22016-09-30 04:41:45 -070014989 /* call before get_platform_config(), after init_chip_resources() */
14990 ret = eprom_init(dd);
14991 if (ret)
14992 goto bail_free_rcverr;
14993
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014994 /* Needs to be called before hfi1_firmware_init */
14995 get_platform_config(dd);
14996
Mike Marciniszyn77241052015-07-30 15:17:43 -040014997 /* read in firmware */
14998 ret = hfi1_firmware_init(dd);
14999 if (ret)
15000 goto bail_cleanup;
15001
15002 /*
15003 * In general, the PCIe Gen3 transition must occur after the
15004 * chip has been idled (so it won't initiate any PCIe transactions
15005 * e.g. an interrupt) and before the driver changes any registers
15006 * (the transition will reset the registers).
15007 *
15008 * In particular, place this call after:
15009 * - init_chip() - the chip will not initiate any PCIe transactions
15010 * - pcie_speeds() - reads the current link speed
15011 * - hfi1_firmware_init() - the needed firmware is ready to be
15012 * downloaded
15013 */
15014 ret = do_pcie_gen3_transition(dd);
15015 if (ret)
15016 goto bail_cleanup;
15017
15018 /* start setting dd values and adjusting CSRs */
15019 init_early_variables(dd);
15020
15021 parse_platform_config(dd);
15022
Dean Luick5d9157a2015-11-16 21:59:34 -050015023 ret = obtain_boardname(dd);
15024 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040015025 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040015026
15027 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050015028 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040015029 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040015030 (u32)dd->majrev,
15031 (u32)dd->minrev,
15032 (dd->revision >> CCE_REVISION_SW_SHIFT)
15033 & CCE_REVISION_SW_MASK);
15034
15035 ret = set_up_context_variables(dd);
15036 if (ret)
15037 goto bail_cleanup;
15038
15039 /* set initial RXE CSRs */
15040 init_rxe(dd);
15041 /* set initial TXE CSRs */
15042 init_txe(dd);
15043 /* set initial non-RXE, non-TXE CSRs */
15044 init_other(dd);
15045 /* set up KDETH QP prefix in both RX and TX CSRs */
15046 init_kdeth_qp(dd);
15047
Dennis Dalessandro41973442016-07-25 07:52:36 -070015048 ret = hfi1_dev_affinity_init(dd);
15049 if (ret)
15050 goto bail_cleanup;
Mitko Haralanov957558c2016-02-03 14:33:40 -080015051
Mike Marciniszyn77241052015-07-30 15:17:43 -040015052 /* send contexts must be set up before receive contexts */
15053 ret = init_send_contexts(dd);
15054 if (ret)
15055 goto bail_cleanup;
15056
15057 ret = hfi1_create_ctxts(dd);
15058 if (ret)
15059 goto bail_cleanup;
15060
15061 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15062 /*
15063 * rcd[0] is guaranteed to be valid by this point. Also, all
15064 * context are using the same value, as per the module parameter.
15065 */
15066 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15067
15068 ret = init_pervl_scs(dd);
15069 if (ret)
15070 goto bail_cleanup;
15071
15072 /* sdma init */
15073 for (i = 0; i < dd->num_pports; ++i) {
15074 ret = sdma_init(dd, i);
15075 if (ret)
15076 goto bail_cleanup;
15077 }
15078
15079 /* use contexts created by hfi1_create_ctxts */
15080 ret = set_up_interrupts(dd);
15081 if (ret)
15082 goto bail_cleanup;
15083
15084 /* set up LCB access - must be after set_up_interrupts() */
15085 init_lcb_access(dd);
15086
Ira Weinyfc0b76c2016-07-27 21:09:40 -040015087 /*
15088 * Serial number is created from the base guid:
15089 * [27:24] = base guid [38:35]
15090 * [23: 0] = base guid [23: 0]
15091 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040015092 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
Ira Weinyfc0b76c2016-07-27 21:09:40 -040015093 (dd->base_guid & 0xFFFFFF) |
15094 ((dd->base_guid >> 11) & 0xF000000));
Mike Marciniszyn77241052015-07-30 15:17:43 -040015095
15096 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15097 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15098 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15099
15100 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15101 if (ret)
15102 goto bail_clear_intr;
Mike Marciniszyn77241052015-07-30 15:17:43 -040015103
15104 thermal_init(dd);
15105
15106 ret = init_cntrs(dd);
15107 if (ret)
15108 goto bail_clear_intr;
15109
15110 ret = init_rcverr(dd);
15111 if (ret)
15112 goto bail_free_cntrs;
15113
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -070015114 init_completion(&dd->user_comp);
15115
15116 /* The user refcount starts with one to inidicate an active device */
15117 atomic_set(&dd->user_refcount, 1);
15118
Mike Marciniszyn77241052015-07-30 15:17:43 -040015119 goto bail;
15120
15121bail_free_rcverr:
15122 free_rcverr(dd);
15123bail_free_cntrs:
15124 free_cntrs(dd);
15125bail_clear_intr:
15126 clean_up_interrupts(dd);
15127bail_cleanup:
15128 hfi1_pcie_ddcleanup(dd);
15129bail_free:
15130 hfi1_free_devdata(dd);
15131 dd = ERR_PTR(ret);
15132bail:
15133 return dd;
15134}
15135
15136static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15137 u32 dw_len)
15138{
15139 u32 delta_cycles;
15140 u32 current_egress_rate = ppd->current_egress_rate;
15141 /* rates here are in units of 10^6 bits/sec */
15142
15143 if (desired_egress_rate == -1)
15144 return 0; /* shouldn't happen */
15145
15146 if (desired_egress_rate >= current_egress_rate)
15147 return 0; /* we can't help go faster, only slower */
15148
15149 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15150 egress_cycles(dw_len * 4, current_egress_rate);
15151
15152 return (u16)delta_cycles;
15153}
15154
Mike Marciniszyn77241052015-07-30 15:17:43 -040015155/**
15156 * create_pbc - build a pbc for transmission
15157 * @flags: special case flags or-ed in built pbc
15158 * @srate: static rate
15159 * @vl: vl
15160 * @dwlen: dword length (header words + data words + pbc words)
15161 *
15162 * Create a PBC with the given flags, rate, VL, and length.
15163 *
15164 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15165 * for verbs, which does not use this PSM feature. The lone other caller
15166 * is for the diagnostic interface which calls this if the user does not
15167 * supply their own PBC.
15168 */
15169u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15170 u32 dw_len)
15171{
15172 u64 pbc, delay = 0;
15173
15174 if (unlikely(srate_mbs))
15175 delay = delay_cycles(ppd, srate_mbs, dw_len);
15176
15177 pbc = flags
15178 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15179 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15180 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15181 | (dw_len & PBC_LENGTH_DWS_MASK)
15182 << PBC_LENGTH_DWS_SHIFT;
15183
15184 return pbc;
15185}
15186
15187#define SBUS_THERMAL 0x4f
15188#define SBUS_THERM_MONITOR_MODE 0x1
15189
15190#define THERM_FAILURE(dev, ret, reason) \
15191 dd_dev_err((dd), \
15192 "Thermal sensor initialization failed: %s (%d)\n", \
15193 (reason), (ret))
15194
15195/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070015196 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040015197 *
15198 * After initialization, enable polling of thermal sensor through
15199 * SBus interface. In order for this to work, the SBus Master
15200 * firmware has to be loaded due to the fact that the HW polling
15201 * logic uses SBus interrupts, which are not supported with
15202 * default firmware. Otherwise, no data will be returned through
15203 * the ASIC_STS_THERM CSR.
15204 */
15205static int thermal_init(struct hfi1_devdata *dd)
15206{
15207 int ret = 0;
15208
15209 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080015210 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040015211 return ret;
15212
Dean Luick576531f2016-03-05 08:50:01 -080015213 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15214 if (ret) {
15215 THERM_FAILURE(dd, ret, "Acquire SBus");
15216 return ret;
15217 }
15218
Mike Marciniszyn77241052015-07-30 15:17:43 -040015219 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050015220 /* Disable polling of thermal readings */
15221 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15222 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015223 /* Thermal Sensor Initialization */
15224 /* Step 1: Reset the Thermal SBus Receiver */
15225 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15226 RESET_SBUS_RECEIVER, 0);
15227 if (ret) {
15228 THERM_FAILURE(dd, ret, "Bus Reset");
15229 goto done;
15230 }
15231 /* Step 2: Set Reset bit in Thermal block */
15232 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15233 WRITE_SBUS_RECEIVER, 0x1);
15234 if (ret) {
15235 THERM_FAILURE(dd, ret, "Therm Block Reset");
15236 goto done;
15237 }
15238 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15239 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15240 WRITE_SBUS_RECEIVER, 0x32);
15241 if (ret) {
15242 THERM_FAILURE(dd, ret, "Write Clock Div");
15243 goto done;
15244 }
15245 /* Step 4: Select temperature mode */
15246 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15247 WRITE_SBUS_RECEIVER,
15248 SBUS_THERM_MONITOR_MODE);
15249 if (ret) {
15250 THERM_FAILURE(dd, ret, "Write Mode Sel");
15251 goto done;
15252 }
15253 /* Step 5: De-assert block reset and start conversion */
15254 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15255 WRITE_SBUS_RECEIVER, 0x2);
15256 if (ret) {
15257 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15258 goto done;
15259 }
15260 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15261 msleep(22);
15262
15263 /* Enable polling of thermal readings */
15264 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080015265
15266 /* Set initialized flag */
15267 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15268 if (ret)
15269 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15270
Mike Marciniszyn77241052015-07-30 15:17:43 -040015271done:
Dean Luick576531f2016-03-05 08:50:01 -080015272 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015273 return ret;
15274}
15275
15276static void handle_temp_err(struct hfi1_devdata *dd)
15277{
15278 struct hfi1_pportdata *ppd = &dd->pport[0];
15279 /*
15280 * Thermal Critical Interrupt
15281 * Put the device into forced freeze mode, take link down to
15282 * offline, and put DC into reset.
15283 */
15284 dd_dev_emerg(dd,
15285 "Critical temperature reached! Forcing device into freeze mode!\n");
15286 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080015287 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015288 /*
15289 * Shut DC down as much and as quickly as possible.
15290 *
15291 * Step 1: Take the link down to OFFLINE. This will cause the
15292 * 8051 to put the Serdes in reset. However, we don't want to
15293 * go through the entire link state machine since we want to
15294 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15295 * but rather an attempt to save the chip.
15296 * Code below is almost the same as quiet_serdes() but avoids
15297 * all the extra work and the sleeps.
15298 */
15299 ppd->driver_link_ready = 0;
15300 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080015301 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15302 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015303 /*
15304 * Step 2: Shutdown LCB and 8051
15305 * After shutdown, do not restore DC_CFG_RESET value.
15306 */
15307 dc_shutdown(dd);
15308}