blob: b169e892092c2c28ba075d9ba50e89ff124ad82a [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080067#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080068#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040069
70#define NUM_IB_PORTS 1
71
72uint kdeth_qp;
73module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
74MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75
76uint num_vls = HFI1_MAX_VLS_SUPPORTED;
77module_param(num_vls, uint, S_IRUGO);
78MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
79
80/*
81 * Default time to aggregate two 10K packets from the idle state
82 * (timer not running). The timer starts at the end of the first packet,
83 * so only the time for one 10K packet and header plus a bit extra is needed.
84 * 10 * 1024 + 64 header byte = 10304 byte
85 * 10304 byte / 12.5 GB/s = 824.32ns
86 */
87uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
88module_param(rcv_intr_timeout, uint, S_IRUGO);
89MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90
91uint rcv_intr_count = 16; /* same as qib */
92module_param(rcv_intr_count, uint, S_IRUGO);
93MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94
95ushort link_crc_mask = SUPPORTED_CRCS;
96module_param(link_crc_mask, ushort, S_IRUGO);
97MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
98
99uint loopback;
100module_param_named(loopback, loopback, uint, S_IRUGO);
101MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102
103/* Other driver tunables */
104uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
105static ushort crc_14b_sideband = 1;
106static uint use_flr = 1;
107uint quick_linkup; /* skip LNI */
108
109struct flag_table {
110 u64 flag; /* the flag */
111 char *str; /* description string */
112 u16 extra; /* extra information */
113 u16 unused0;
114 u32 unused1;
115};
116
117/* str must be a string constant */
118#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
119#define FLAG_ENTRY0(str, flag) {flag, str, 0}
120
121/* Send Error Consequences */
122#define SEC_WRITE_DROPPED 0x1
123#define SEC_PACKET_DROPPED 0x2
124#define SEC_SC_HALTED 0x4 /* per-context only */
125#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400129#define NUM_MAP_REGS 32
130
131/* Bit offset into the GUID which carries HFI id information */
132#define GUID_HFI_INDEX_SHIFT 39
133
134/* extract the emulation revision */
135#define emulator_rev(dd) ((dd)->irev >> 8)
136/* parallel and serial emulation versions are 3 and 4 respectively */
137#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
138#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
139
140/* RSM fields */
141
142/* packet type */
143#define IB_PACKET_TYPE 2ull
144#define QW_SHIFT 6ull
145/* QPN[7..1] */
146#define QPN_WIDTH 7ull
147
148/* LRH.BTH: QW 0, OFFSET 48 - for match */
149#define LRH_BTH_QW 0ull
150#define LRH_BTH_BIT_OFFSET 48ull
151#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
152#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
153#define LRH_BTH_SELECT
154#define LRH_BTH_MASK 3ull
155#define LRH_BTH_VALUE 2ull
156
157/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
158#define LRH_SC_QW 0ull
159#define LRH_SC_BIT_OFFSET 56ull
160#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
161#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
162#define LRH_SC_MASK 128ull
163#define LRH_SC_VALUE 0ull
164
165/* SC[n..0] QW 0, OFFSET 60 - for select */
166#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
167
168/* QPN[m+n:1] QW 1, OFFSET 1 */
169#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
170
171/* defines to build power on SC2VL table */
172#define SC2VL_VAL( \
173 num, \
174 sc0, sc0val, \
175 sc1, sc1val, \
176 sc2, sc2val, \
177 sc3, sc3val, \
178 sc4, sc4val, \
179 sc5, sc5val, \
180 sc6, sc6val, \
181 sc7, sc7val) \
182( \
183 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
184 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
185 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
186 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
187 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
188 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
189 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
190 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
191)
192
193#define DC_SC_VL_VAL( \
194 range, \
195 e0, e0val, \
196 e1, e1val, \
197 e2, e2val, \
198 e3, e3val, \
199 e4, e4val, \
200 e5, e5val, \
201 e6, e6val, \
202 e7, e7val, \
203 e8, e8val, \
204 e9, e9val, \
205 e10, e10val, \
206 e11, e11val, \
207 e12, e12val, \
208 e13, e13val, \
209 e14, e14val, \
210 e15, e15val) \
211( \
212 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
213 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
214 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
215 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
216 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
217 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
218 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
219 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
220 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
221 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
222 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
223 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
224 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
225 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
226 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
227 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
228)
229
230/* all CceStatus sub-block freeze bits */
231#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
232 | CCE_STATUS_RXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
235/* all CceStatus sub-block TXE pause bits */
236#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
237 | CCE_STATUS_TXE_PAUSED_SMASK \
238 | CCE_STATUS_SDMA_PAUSED_SMASK)
239/* all CceStatus sub-block RXE pause bits */
240#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
241
242/*
243 * CCE Error flags.
244 */
245static struct flag_table cce_err_status_flags[] = {
246/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
247 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
248/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
250/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
251 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
252/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
253 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
254/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
255 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
256/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
257 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
258/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
259 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
260/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
261 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
262/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
264/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
266/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
268/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
270/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
272/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
274/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
275 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
276/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
278/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
280/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
282/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
283 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
284/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
286/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
287 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
288/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
290/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
291 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
292/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
294/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
295 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
296/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
298/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
299 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
300/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
302/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
303 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
304/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
305 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
306/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
307 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
308/*31*/ FLAG_ENTRY0("LATriggered",
309 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
310/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
312/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
313 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
314/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
315 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
316/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
317 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
318/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
320/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
321 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
322/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
324/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
325 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
326/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
327 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
328/*41-63 reserved*/
329};
330
331/*
332 * Misc Error flags
333 */
334#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
335static struct flag_table misc_err_status_flags[] = {
336/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
337/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
338/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
339/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
340/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
341/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
342/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
343/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
344/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
345/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
346/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
347/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
348/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
349};
350
351/*
352 * TXE PIO Error flags and consequences
353 */
354static struct flag_table pio_err_status_flags[] = {
355/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
356 SEC_WRITE_DROPPED,
357 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
358/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
359 SEC_SPC_FREEZE,
360 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
361/* 2*/ FLAG_ENTRY("PioCsrParity",
362 SEC_SPC_FREEZE,
363 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
364/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
365 SEC_SPC_FREEZE,
366 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
367/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
368 SEC_SPC_FREEZE,
369 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
370/* 5*/ FLAG_ENTRY("PioPccFifoParity",
371 SEC_SPC_FREEZE,
372 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
373/* 6*/ FLAG_ENTRY("PioPecFifoParity",
374 SEC_SPC_FREEZE,
375 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
376/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
377 SEC_SPC_FREEZE,
378 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
379/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
380 SEC_SPC_FREEZE,
381 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
382/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
383 SEC_SPC_FREEZE,
384 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
385/*10*/ FLAG_ENTRY("PioSmPktResetParity",
386 SEC_SPC_FREEZE,
387 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
388/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
389 SEC_SPC_FREEZE,
390 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
391/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
394/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
395 0,
396 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
397/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
398 0,
399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
400/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
401 SEC_SPC_FREEZE,
402 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
403/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
406/*17*/ FLAG_ENTRY("PioInitSmIn",
407 0,
408 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
409/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
410 SEC_SPC_FREEZE,
411 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
412/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
415/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
416 0,
417 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
418/*21*/ FLAG_ENTRY("PioWriteDataParity",
419 SEC_SPC_FREEZE,
420 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
421/*22*/ FLAG_ENTRY("PioStateMachine",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
424/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
425 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
426 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
427/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
428 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
429 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
430/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
431 SEC_SPC_FREEZE,
432 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
433/*26*/ FLAG_ENTRY("PioVlfSopParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
436/*27*/ FLAG_ENTRY("PioVlFifoParity",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
439/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
440 SEC_SPC_FREEZE,
441 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
442/*29*/ FLAG_ENTRY("PioPpmcSopLen",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
445/*30-31 reserved*/
446/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
447 SEC_SPC_FREEZE,
448 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
449/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
450 SEC_SPC_FREEZE,
451 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
452/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
453 SEC_SPC_FREEZE,
454 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
455/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
456 SEC_SPC_FREEZE,
457 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
458/*36-63 reserved*/
459};
460
461/* TXE PIO errors that cause an SPC freeze */
462#define ALL_PIO_FREEZE_ERR \
463 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
492
493/*
494 * TXE SDMA Error flags
495 */
496static struct flag_table sdma_err_status_flags[] = {
497/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
498 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
499/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
500 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
501/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
503/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
504 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
505/*04-63 reserved*/
506};
507
508/* TXE SDMA errors that cause an SPC freeze */
509#define ALL_SDMA_FREEZE_ERR \
510 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
512 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
513
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800514/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
515#define PORT_DISCARD_EGRESS_ERRS \
516 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
518 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
519
Mike Marciniszyn77241052015-07-30 15:17:43 -0400520/*
521 * TXE Egress Error flags
522 */
523#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
524static struct flag_table egress_err_status_flags[] = {
525/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
526/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
527/* 2 reserved */
528/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
529 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
530/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
531/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
532/* 6 reserved */
533/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
534 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
535/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
536 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
537/* 9-10 reserved */
538/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
539 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
540/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
541/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
542/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
543/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
544/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
545 SEES(TX_SDMA0_DISALLOWED_PACKET)),
546/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
547 SEES(TX_SDMA1_DISALLOWED_PACKET)),
548/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
549 SEES(TX_SDMA2_DISALLOWED_PACKET)),
550/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
551 SEES(TX_SDMA3_DISALLOWED_PACKET)),
552/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
553 SEES(TX_SDMA4_DISALLOWED_PACKET)),
554/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
555 SEES(TX_SDMA5_DISALLOWED_PACKET)),
556/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
557 SEES(TX_SDMA6_DISALLOWED_PACKET)),
558/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
559 SEES(TX_SDMA7_DISALLOWED_PACKET)),
560/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
561 SEES(TX_SDMA8_DISALLOWED_PACKET)),
562/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
563 SEES(TX_SDMA9_DISALLOWED_PACKET)),
564/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
565 SEES(TX_SDMA10_DISALLOWED_PACKET)),
566/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
567 SEES(TX_SDMA11_DISALLOWED_PACKET)),
568/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
569 SEES(TX_SDMA12_DISALLOWED_PACKET)),
570/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
571 SEES(TX_SDMA13_DISALLOWED_PACKET)),
572/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
573 SEES(TX_SDMA14_DISALLOWED_PACKET)),
574/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
575 SEES(TX_SDMA15_DISALLOWED_PACKET)),
576/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
578/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
580/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
582/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
584/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
586/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
588/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
590/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
592/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
594/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
595/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
596/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
597/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
598/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
599/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
600/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
601/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
602/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
603/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
604/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
605/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
606/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
607/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
608/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
609/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
610/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
611/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
612/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
613/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
614/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
615/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
616 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
617/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
618 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
619};
620
621/*
622 * TXE Egress Error Info flags
623 */
624#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
625static struct flag_table egress_err_info_flags[] = {
626/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
627/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
628/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
630/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
631/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
632/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
633/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
634/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
635/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
636/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
637/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
638/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
639/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
640/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
641/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
642/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
643/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
644/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
645/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
646/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
647/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
648};
649
650/* TXE Egress errors that cause an SPC freeze */
651#define ALL_TXE_EGRESS_FREEZE_ERR \
652 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
653 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
655 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
656 | SEES(TX_LAUNCH_CSR_PARITY) \
657 | SEES(TX_SBRD_CTL_CSR_PARITY) \
658 | SEES(TX_CONFIG_PARITY) \
659 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
668 | SEES(TX_CREDIT_RETURN_PARITY))
669
670/*
671 * TXE Send error flags
672 */
673#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
674static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500675/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400676/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
677/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
678};
679
680/*
681 * TXE Send Context Error flags and consequences
682 */
683static struct flag_table sc_err_status_flags[] = {
684/* 0*/ FLAG_ENTRY("InconsistentSop",
685 SEC_PACKET_DROPPED | SEC_SC_HALTED,
686 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
687/* 1*/ FLAG_ENTRY("DisallowedPacket",
688 SEC_PACKET_DROPPED | SEC_SC_HALTED,
689 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
690/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
691 SEC_WRITE_DROPPED | SEC_SC_HALTED,
692 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
693/* 3*/ FLAG_ENTRY("WriteOverflow",
694 SEC_WRITE_DROPPED | SEC_SC_HALTED,
695 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
696/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
697 SEC_WRITE_DROPPED | SEC_SC_HALTED,
698 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
699/* 5-63 reserved*/
700};
701
702/*
703 * RXE Receive Error flags
704 */
705#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
706static struct flag_table rxe_err_status_flags[] = {
707/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
708/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
709/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
710/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
711/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
712/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
713/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
714/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
715/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
716/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
717/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
718/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
719/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
720/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
721/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
722/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
723/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
724 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
725/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
726/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
727/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
728 RXES(RBUF_BLOCK_LIST_READ_UNC)),
729/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
730 RXES(RBUF_BLOCK_LIST_READ_COR)),
731/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
732 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
733/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
734 RXES(RBUF_CSR_QENT_CNT_PARITY)),
735/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
736 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
737/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
738 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
739/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
740/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
741/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
742 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
743/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
744/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
745/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
746/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
747/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
748/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
749/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
750/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
751 RXES(RBUF_FL_INITDONE_PARITY)),
752/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
753 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
754/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
755/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
756/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
757/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
758 RXES(LOOKUP_DES_PART1_UNC_COR)),
759/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
760 RXES(LOOKUP_DES_PART2_PARITY)),
761/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
762/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
763/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
764/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
765/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
766/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
767/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
768/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
769/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
770/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
771/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
772/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
773/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
774/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
775/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
776/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
777/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
778/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
779/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
780/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
781/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
782/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
783};
784
785/* RXE errors that will trigger an SPC freeze */
786#define ALL_RXE_FREEZE_ERR \
787 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
831
832#define RXE_FREEZE_ABORT_MASK \
833 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
835 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
836
837/*
838 * DCC Error Flags
839 */
840#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
841static struct flag_table dcc_err_flags[] = {
842 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
843 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
844 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
845 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
847 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
848 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
849 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
850 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
851 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
852 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
853 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
854 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
855 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
856 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
857 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
858 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
859 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
860 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
861 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
862 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
863 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
864 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
865 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
866 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
867 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
868 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
869 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
870 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
871 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
872 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
873 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
875 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
876 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
877 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
878 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
879 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
880 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
881 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
882 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
883 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
884 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
886 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
887 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
888};
889
890/*
891 * LCB error flags
892 */
893#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
894static struct flag_table lcb_err_flags[] = {
895/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
896/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
897/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
898/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
899 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
900/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
901/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
902/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
903/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
904/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
905/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
906/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
907/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
908/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
909/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
910 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
911/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
912/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
913/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
914/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
915/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
916/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
917 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
918/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
919/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
920/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
921/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
922/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
923/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
924/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
925 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
926/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
927/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
928 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
929/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
930 LCBE(REDUNDANT_FLIT_PARITY_ERR))
931};
932
933/*
934 * DC8051 Error Flags
935 */
936#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
937static struct flag_table dc8051_err_flags[] = {
938 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
939 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
940 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
941 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
942 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
943 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
944 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
945 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
946 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
947 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
948 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
949};
950
951/*
952 * DC8051 Information Error flags
953 *
954 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
955 */
956static struct flag_table dc8051_info_err_flags[] = {
957 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
958 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
959 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
960 FLAG_ENTRY0("Serdes internal loopback failure",
961 FAILED_SERDES_INTERNAL_LOOPBACK),
962 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
963 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
964 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
965 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
966 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
968 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
969 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
970};
971
972/*
973 * DC8051 Information Host Information flags
974 *
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976 */
977static struct flag_table dc8051_info_host_msg_flags[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
987};
988
989
990static u32 encoded_size(u32 size);
991static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
992static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
993static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
994 u8 *continuous);
995static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
996 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
997static void read_vc_remote_link_width(struct hfi1_devdata *dd,
998 u8 *remote_tx_rate, u16 *link_widths);
999static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1000 u8 *flag_bits, u16 *link_widths);
1001static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1002 u8 *device_rev);
1003static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1004static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1005static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1006 u8 *tx_polarity_inversion,
1007 u8 *rx_polarity_inversion, u8 *max_rate);
1008static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1009 unsigned int context, u64 err_status);
1010static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1011static void handle_dcc_err(struct hfi1_devdata *dd,
1012 unsigned int context, u64 err_status);
1013static void handle_lcb_err(struct hfi1_devdata *dd,
1014 unsigned int context, u64 err_status);
1015static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023static void set_partition_keys(struct hfi1_pportdata *);
1024static const char *link_state_name(u32 state);
1025static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1026 u32 state);
1027static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1028 u64 *out_data);
1029static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1030static int thermal_init(struct hfi1_devdata *dd);
1031
1032static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1033 int msecs);
1034static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1035static void handle_temp_err(struct hfi1_devdata *);
1036static void dc_shutdown(struct hfi1_devdata *);
1037static void dc_start(struct hfi1_devdata *);
1038
1039/*
1040 * Error interrupt table entry. This is used as input to the interrupt
1041 * "clear down" routine used for all second tier error interrupt register.
1042 * Second tier interrupt registers have a single bit representing them
1043 * in the top-level CceIntStatus.
1044 */
1045struct err_reg_info {
1046 u32 status; /* status CSR offset */
1047 u32 clear; /* clear CSR offset */
1048 u32 mask; /* mask CSR offset */
1049 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1050 const char *desc;
1051};
1052
1053#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1054#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1055#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1056
1057/*
1058 * Helpers for building HFI and DC error interrupt table entries. Different
1059 * helpers are needed because of inconsistent register names.
1060 */
1061#define EE(reg, handler, desc) \
1062 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1063 handler, desc }
1064#define DC_EE1(reg, handler, desc) \
1065 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1066#define DC_EE2(reg, handler, desc) \
1067 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1068
1069/*
1070 * Table of the "misc" grouping of error interrupts. Each entry refers to
1071 * another register containing more information.
1072 */
1073static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1074/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1075/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1076/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1077/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1078/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1079/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1080/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1081/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1082 /* the rest are reserved */
1083};
1084
1085/*
1086 * Index into the Various section of the interrupt sources
1087 * corresponding to the Critical Temperature interrupt.
1088 */
1089#define TCRIT_INT_SOURCE 4
1090
1091/*
1092 * SDMA error interrupt entry - refers to another register containing more
1093 * information.
1094 */
1095static const struct err_reg_info sdma_eng_err =
1096 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1097
1098static const struct err_reg_info various_err[NUM_VARIOUS] = {
1099/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1100/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1101/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1102/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1103/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1104 /* rest are reserved */
1105};
1106
1107/*
1108 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1109 * register can not be derived from the MTU value because 10K is not
1110 * a power of 2. Therefore, we need a constant. Everything else can
1111 * be calculated.
1112 */
1113#define DCC_CFG_PORT_MTU_CAP_10240 7
1114
1115/*
1116 * Table of the DC grouping of error interrupts. Each entry refers to
1117 * another register containing more information.
1118 */
1119static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1120/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1121/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1122/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1123/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1124 /* the rest are reserved */
1125};
1126
1127struct cntr_entry {
1128 /*
1129 * counter name
1130 */
1131 char *name;
1132
1133 /*
1134 * csr to read for name (if applicable)
1135 */
1136 u64 csr;
1137
1138 /*
1139 * offset into dd or ppd to store the counter's value
1140 */
1141 int offset;
1142
1143 /*
1144 * flags
1145 */
1146 u8 flags;
1147
1148 /*
1149 * accessor for stat element, context either dd or ppd
1150 */
1151 u64 (*rw_cntr)(const struct cntr_entry *,
1152 void *context,
1153 int vl,
1154 int mode,
1155 u64 data);
1156};
1157
1158#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1159#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1160
1161#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1162{ \
1163 name, \
1164 csr, \
1165 offset, \
1166 flags, \
1167 accessor \
1168}
1169
1170/* 32bit RXE */
1171#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1172CNTR_ELEM(#name, \
1173 (counter * 8 + RCV_COUNTER_ARRAY32), \
1174 0, flags | CNTR_32BIT, \
1175 port_access_u32_csr)
1176
1177#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1178CNTR_ELEM(#name, \
1179 (counter * 8 + RCV_COUNTER_ARRAY32), \
1180 0, flags | CNTR_32BIT, \
1181 dev_access_u32_csr)
1182
1183/* 64bit RXE */
1184#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1185CNTR_ELEM(#name, \
1186 (counter * 8 + RCV_COUNTER_ARRAY64), \
1187 0, flags, \
1188 port_access_u64_csr)
1189
1190#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1191CNTR_ELEM(#name, \
1192 (counter * 8 + RCV_COUNTER_ARRAY64), \
1193 0, flags, \
1194 dev_access_u64_csr)
1195
1196#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1197#define OVR_ELM(ctx) \
1198CNTR_ELEM("RcvHdrOvr" #ctx, \
1199 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1200 0, CNTR_NORMAL, port_access_u64_csr)
1201
1202/* 32bit TXE */
1203#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1204CNTR_ELEM(#name, \
1205 (counter * 8 + SEND_COUNTER_ARRAY32), \
1206 0, flags | CNTR_32BIT, \
1207 port_access_u32_csr)
1208
1209/* 64bit TXE */
1210#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1211CNTR_ELEM(#name, \
1212 (counter * 8 + SEND_COUNTER_ARRAY64), \
1213 0, flags, \
1214 port_access_u64_csr)
1215
1216# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1217CNTR_ELEM(#name,\
1218 counter * 8 + SEND_COUNTER_ARRAY64, \
1219 0, \
1220 flags, \
1221 dev_access_u64_csr)
1222
1223/* CCE */
1224#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1225CNTR_ELEM(#name, \
1226 (counter * 8 + CCE_COUNTER_ARRAY32), \
1227 0, flags | CNTR_32BIT, \
1228 dev_access_u32_csr)
1229
1230#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1231CNTR_ELEM(#name, \
1232 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1233 0, flags | CNTR_32BIT, \
1234 dev_access_u32_csr)
1235
1236/* DC */
1237#define DC_PERF_CNTR(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239 counter, \
1240 0, \
1241 flags, \
1242 dev_access_u64_csr)
1243
1244#define DC_PERF_CNTR_LCB(name, counter, flags) \
1245CNTR_ELEM(#name, \
1246 counter, \
1247 0, \
1248 flags, \
1249 dc_access_lcb_cntr)
1250
1251/* ibp counters */
1252#define SW_IBP_CNTR(name, cntr) \
1253CNTR_ELEM(#name, \
1254 0, \
1255 0, \
1256 CNTR_SYNTH, \
1257 access_ibp_##cntr)
1258
1259u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1260{
1261 u64 val;
1262
1263 if (dd->flags & HFI1_PRESENT) {
1264 val = readq((void __iomem *)dd->kregbase + offset);
1265 return val;
1266 }
1267 return -1;
1268}
1269
1270void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1271{
1272 if (dd->flags & HFI1_PRESENT)
1273 writeq(value, (void __iomem *)dd->kregbase + offset);
1274}
1275
1276void __iomem *get_csr_addr(
1277 struct hfi1_devdata *dd,
1278 u32 offset)
1279{
1280 return (void __iomem *)dd->kregbase + offset;
1281}
1282
1283static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1284 int mode, u64 value)
1285{
1286 u64 ret;
1287
1288
1289 if (mode == CNTR_MODE_R) {
1290 ret = read_csr(dd, csr);
1291 } else if (mode == CNTR_MODE_W) {
1292 write_csr(dd, csr, value);
1293 ret = value;
1294 } else {
1295 dd_dev_err(dd, "Invalid cntr register access mode");
1296 return 0;
1297 }
1298
1299 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1300 return ret;
1301}
1302
1303/* Dev Access */
1304static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1305 void *context, int vl, int mode, u64 data)
1306{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301307 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001308 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001309
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001310 if (entry->flags & CNTR_SDMA) {
1311 if (vl == CNTR_INVALID_VL)
1312 return 0;
1313 csr += 0x100 * vl;
1314 } else {
1315 if (vl != CNTR_INVALID_VL)
1316 return 0;
1317 }
1318 return read_write_csr(dd, csr, mode, data);
1319}
1320
1321static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1322 void *context, int idx, int mode, u64 data)
1323{
1324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1325
1326 if (dd->per_sdma && idx < dd->num_sdma)
1327 return dd->per_sdma[idx].err_cnt;
1328 return 0;
1329}
1330
1331static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1332 void *context, int idx, int mode, u64 data)
1333{
1334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1335
1336 if (dd->per_sdma && idx < dd->num_sdma)
1337 return dd->per_sdma[idx].sdma_int_cnt;
1338 return 0;
1339}
1340
1341static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1342 void *context, int idx, int mode, u64 data)
1343{
1344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1345
1346 if (dd->per_sdma && idx < dd->num_sdma)
1347 return dd->per_sdma[idx].idle_int_cnt;
1348 return 0;
1349}
1350
1351static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1352 void *context, int idx, int mode,
1353 u64 data)
1354{
1355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1356
1357 if (dd->per_sdma && idx < dd->num_sdma)
1358 return dd->per_sdma[idx].progress_int_cnt;
1359 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001360}
1361
1362static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1363 int vl, int mode, u64 data)
1364{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301365 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001366
1367 u64 val = 0;
1368 u64 csr = entry->csr;
1369
1370 if (entry->flags & CNTR_VL) {
1371 if (vl == CNTR_INVALID_VL)
1372 return 0;
1373 csr += 8 * vl;
1374 } else {
1375 if (vl != CNTR_INVALID_VL)
1376 return 0;
1377 }
1378
1379 val = read_write_csr(dd, csr, mode, data);
1380 return val;
1381}
1382
1383static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1384 int vl, int mode, u64 data)
1385{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301386 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001387 u32 csr = entry->csr;
1388 int ret = 0;
1389
1390 if (vl != CNTR_INVALID_VL)
1391 return 0;
1392 if (mode == CNTR_MODE_R)
1393 ret = read_lcb_csr(dd, csr, &data);
1394 else if (mode == CNTR_MODE_W)
1395 ret = write_lcb_csr(dd, csr, data);
1396
1397 if (ret) {
1398 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1399 return 0;
1400 }
1401
1402 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1403 return data;
1404}
1405
1406/* Port Access */
1407static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1408 int vl, int mode, u64 data)
1409{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301410 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001411
1412 if (vl != CNTR_INVALID_VL)
1413 return 0;
1414 return read_write_csr(ppd->dd, entry->csr, mode, data);
1415}
1416
1417static u64 port_access_u64_csr(const struct cntr_entry *entry,
1418 void *context, int vl, int mode, u64 data)
1419{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301420 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001421 u64 val;
1422 u64 csr = entry->csr;
1423
1424 if (entry->flags & CNTR_VL) {
1425 if (vl == CNTR_INVALID_VL)
1426 return 0;
1427 csr += 8 * vl;
1428 } else {
1429 if (vl != CNTR_INVALID_VL)
1430 return 0;
1431 }
1432 val = read_write_csr(ppd->dd, csr, mode, data);
1433 return val;
1434}
1435
1436/* Software defined */
1437static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1438 u64 data)
1439{
1440 u64 ret;
1441
1442 if (mode == CNTR_MODE_R) {
1443 ret = *cntr;
1444 } else if (mode == CNTR_MODE_W) {
1445 *cntr = data;
1446 ret = data;
1447 } else {
1448 dd_dev_err(dd, "Invalid cntr sw access mode");
1449 return 0;
1450 }
1451
1452 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1453
1454 return ret;
1455}
1456
1457static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1458 int vl, int mode, u64 data)
1459{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301460 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001461
1462 if (vl != CNTR_INVALID_VL)
1463 return 0;
1464 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1465}
1466
1467static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1468 int vl, int mode, u64 data)
1469{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301470 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001471
1472 if (vl != CNTR_INVALID_VL)
1473 return 0;
1474 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1475}
1476
Dean Luick6d014532015-12-01 15:38:23 -05001477static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1478 void *context, int vl, int mode,
1479 u64 data)
1480{
1481 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1482
1483 if (vl != CNTR_INVALID_VL)
1484 return 0;
1485 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1486}
1487
Mike Marciniszyn77241052015-07-30 15:17:43 -04001488static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1489 void *context, int vl, int mode, u64 data)
1490{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001491 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1492 u64 zero = 0;
1493 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001494
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001495 if (vl == CNTR_INVALID_VL)
1496 counter = &ppd->port_xmit_discards;
1497 else if (vl >= 0 && vl < C_VL_COUNT)
1498 counter = &ppd->port_xmit_discards_vl[vl];
1499 else
1500 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001501
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001502 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001503}
1504
1505static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1506 void *context, int vl, int mode, u64 data)
1507{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301508 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001509
1510 if (vl != CNTR_INVALID_VL)
1511 return 0;
1512
1513 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1514 mode, data);
1515}
1516
1517static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1518 void *context, int vl, int mode, u64 data)
1519{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301520 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001521
1522 if (vl != CNTR_INVALID_VL)
1523 return 0;
1524
1525 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1526 mode, data);
1527}
1528
1529u64 get_all_cpu_total(u64 __percpu *cntr)
1530{
1531 int cpu;
1532 u64 counter = 0;
1533
1534 for_each_possible_cpu(cpu)
1535 counter += *per_cpu_ptr(cntr, cpu);
1536 return counter;
1537}
1538
1539static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1540 u64 __percpu *cntr,
1541 int vl, int mode, u64 data)
1542{
1543
1544 u64 ret = 0;
1545
1546 if (vl != CNTR_INVALID_VL)
1547 return 0;
1548
1549 if (mode == CNTR_MODE_R) {
1550 ret = get_all_cpu_total(cntr) - *z_val;
1551 } else if (mode == CNTR_MODE_W) {
1552 /* A write can only zero the counter */
1553 if (data == 0)
1554 *z_val = get_all_cpu_total(cntr);
1555 else
1556 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1557 } else {
1558 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1559 return 0;
1560 }
1561
1562 return ret;
1563}
1564
1565static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1566 void *context, int vl, int mode, u64 data)
1567{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301568 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001569
1570 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1571 mode, data);
1572}
1573
1574static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1575 void *context, int vl, int mode, u64 data)
1576{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301577 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001578
1579 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1580 mode, data);
1581}
1582
1583static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1584 void *context, int vl, int mode, u64 data)
1585{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301586 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001587
1588 return dd->verbs_dev.n_piowait;
1589}
1590
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001591static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1592 void *context, int vl, int mode, u64 data)
1593{
1594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1595
1596 return dd->verbs_dev.n_piodrain;
1597}
1598
Mike Marciniszyn77241052015-07-30 15:17:43 -04001599static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1600 void *context, int vl, int mode, u64 data)
1601{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301602 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001603
1604 return dd->verbs_dev.n_txwait;
1605}
1606
1607static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1608 void *context, int vl, int mode, u64 data)
1609{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301610 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001611
1612 return dd->verbs_dev.n_kmem_wait;
1613}
1614
Dean Luickb4219222015-10-26 10:28:35 -04001615static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1616 void *context, int vl, int mode, u64 data)
1617{
1618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1619
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001620 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1621 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001622}
1623
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001624/* Software counters for the error status bits within MISC_ERR_STATUS */
1625static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1626 void *context, int vl, int mode,
1627 u64 data)
1628{
1629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1630
1631 return dd->misc_err_status_cnt[12];
1632}
1633
1634static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1635 void *context, int vl, int mode,
1636 u64 data)
1637{
1638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1639
1640 return dd->misc_err_status_cnt[11];
1641}
1642
1643static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1644 void *context, int vl, int mode,
1645 u64 data)
1646{
1647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1648
1649 return dd->misc_err_status_cnt[10];
1650}
1651
1652static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1653 void *context, int vl,
1654 int mode, u64 data)
1655{
1656 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1657
1658 return dd->misc_err_status_cnt[9];
1659}
1660
1661static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1662 void *context, int vl, int mode,
1663 u64 data)
1664{
1665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1666
1667 return dd->misc_err_status_cnt[8];
1668}
1669
1670static u64 access_misc_efuse_read_bad_addr_err_cnt(
1671 const struct cntr_entry *entry,
1672 void *context, int vl, int mode, u64 data)
1673{
1674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1675
1676 return dd->misc_err_status_cnt[7];
1677}
1678
1679static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1680 void *context, int vl,
1681 int mode, u64 data)
1682{
1683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685 return dd->misc_err_status_cnt[6];
1686}
1687
1688static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1689 void *context, int vl, int mode,
1690 u64 data)
1691{
1692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1693
1694 return dd->misc_err_status_cnt[5];
1695}
1696
1697static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1698 void *context, int vl, int mode,
1699 u64 data)
1700{
1701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1702
1703 return dd->misc_err_status_cnt[4];
1704}
1705
1706static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1707 void *context, int vl,
1708 int mode, u64 data)
1709{
1710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1711
1712 return dd->misc_err_status_cnt[3];
1713}
1714
1715static u64 access_misc_csr_write_bad_addr_err_cnt(
1716 const struct cntr_entry *entry,
1717 void *context, int vl, int mode, u64 data)
1718{
1719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1720
1721 return dd->misc_err_status_cnt[2];
1722}
1723
1724static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1725 void *context, int vl,
1726 int mode, u64 data)
1727{
1728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729
1730 return dd->misc_err_status_cnt[1];
1731}
1732
1733static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1734 void *context, int vl, int mode,
1735 u64 data)
1736{
1737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1738
1739 return dd->misc_err_status_cnt[0];
1740}
1741
1742/*
1743 * Software counter for the aggregate of
1744 * individual CceErrStatus counters
1745 */
1746static u64 access_sw_cce_err_status_aggregated_cnt(
1747 const struct cntr_entry *entry,
1748 void *context, int vl, int mode, u64 data)
1749{
1750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1751
1752 return dd->sw_cce_err_status_aggregate;
1753}
1754
1755/*
1756 * Software counters corresponding to each of the
1757 * error status bits within CceErrStatus
1758 */
1759static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1760 void *context, int vl, int mode,
1761 u64 data)
1762{
1763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764
1765 return dd->cce_err_status_cnt[40];
1766}
1767
1768static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1769 void *context, int vl, int mode,
1770 u64 data)
1771{
1772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773
1774 return dd->cce_err_status_cnt[39];
1775}
1776
1777static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1778 void *context, int vl, int mode,
1779 u64 data)
1780{
1781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782
1783 return dd->cce_err_status_cnt[38];
1784}
1785
1786static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1787 void *context, int vl, int mode,
1788 u64 data)
1789{
1790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791
1792 return dd->cce_err_status_cnt[37];
1793}
1794
1795static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1796 void *context, int vl, int mode,
1797 u64 data)
1798{
1799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801 return dd->cce_err_status_cnt[36];
1802}
1803
1804static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1805 const struct cntr_entry *entry,
1806 void *context, int vl, int mode, u64 data)
1807{
1808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810 return dd->cce_err_status_cnt[35];
1811}
1812
1813static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1814 const struct cntr_entry *entry,
1815 void *context, int vl, int mode, u64 data)
1816{
1817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819 return dd->cce_err_status_cnt[34];
1820}
1821
1822static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1823 void *context, int vl,
1824 int mode, u64 data)
1825{
1826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828 return dd->cce_err_status_cnt[33];
1829}
1830
1831static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1832 void *context, int vl, int mode,
1833 u64 data)
1834{
1835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1836
1837 return dd->cce_err_status_cnt[32];
1838}
1839
1840static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1841 void *context, int vl, int mode, u64 data)
1842{
1843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1844
1845 return dd->cce_err_status_cnt[31];
1846}
1847
1848static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1849 void *context, int vl, int mode,
1850 u64 data)
1851{
1852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853
1854 return dd->cce_err_status_cnt[30];
1855}
1856
1857static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1858 void *context, int vl, int mode,
1859 u64 data)
1860{
1861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862
1863 return dd->cce_err_status_cnt[29];
1864}
1865
1866static u64 access_pcic_transmit_back_parity_err_cnt(
1867 const struct cntr_entry *entry,
1868 void *context, int vl, int mode, u64 data)
1869{
1870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871
1872 return dd->cce_err_status_cnt[28];
1873}
1874
1875static u64 access_pcic_transmit_front_parity_err_cnt(
1876 const struct cntr_entry *entry,
1877 void *context, int vl, int mode, u64 data)
1878{
1879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880
1881 return dd->cce_err_status_cnt[27];
1882}
1883
1884static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1885 void *context, int vl, int mode,
1886 u64 data)
1887{
1888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889
1890 return dd->cce_err_status_cnt[26];
1891}
1892
1893static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1894 void *context, int vl, int mode,
1895 u64 data)
1896{
1897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898
1899 return dd->cce_err_status_cnt[25];
1900}
1901
1902static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1903 void *context, int vl, int mode,
1904 u64 data)
1905{
1906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907
1908 return dd->cce_err_status_cnt[24];
1909}
1910
1911static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1912 void *context, int vl, int mode,
1913 u64 data)
1914{
1915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916
1917 return dd->cce_err_status_cnt[23];
1918}
1919
1920static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1921 void *context, int vl,
1922 int mode, u64 data)
1923{
1924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925
1926 return dd->cce_err_status_cnt[22];
1927}
1928
1929static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1930 void *context, int vl, int mode,
1931 u64 data)
1932{
1933 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1934
1935 return dd->cce_err_status_cnt[21];
1936}
1937
1938static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1939 const struct cntr_entry *entry,
1940 void *context, int vl, int mode, u64 data)
1941{
1942 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1943
1944 return dd->cce_err_status_cnt[20];
1945}
1946
1947static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1948 void *context, int vl,
1949 int mode, u64 data)
1950{
1951 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1952
1953 return dd->cce_err_status_cnt[19];
1954}
1955
1956static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1957 void *context, int vl, int mode,
1958 u64 data)
1959{
1960 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1961
1962 return dd->cce_err_status_cnt[18];
1963}
1964
1965static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1966 void *context, int vl, int mode,
1967 u64 data)
1968{
1969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1970
1971 return dd->cce_err_status_cnt[17];
1972}
1973
1974static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1975 void *context, int vl, int mode,
1976 u64 data)
1977{
1978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1979
1980 return dd->cce_err_status_cnt[16];
1981}
1982
1983static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1984 void *context, int vl, int mode,
1985 u64 data)
1986{
1987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1988
1989 return dd->cce_err_status_cnt[15];
1990}
1991
1992static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1993 void *context, int vl,
1994 int mode, u64 data)
1995{
1996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1997
1998 return dd->cce_err_status_cnt[14];
1999}
2000
2001static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2002 void *context, int vl, int mode,
2003 u64 data)
2004{
2005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2006
2007 return dd->cce_err_status_cnt[13];
2008}
2009
2010static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2011 const struct cntr_entry *entry,
2012 void *context, int vl, int mode, u64 data)
2013{
2014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2015
2016 return dd->cce_err_status_cnt[12];
2017}
2018
2019static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2020 const struct cntr_entry *entry,
2021 void *context, int vl, int mode, u64 data)
2022{
2023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2024
2025 return dd->cce_err_status_cnt[11];
2026}
2027
2028static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2029 const struct cntr_entry *entry,
2030 void *context, int vl, int mode, u64 data)
2031{
2032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2033
2034 return dd->cce_err_status_cnt[10];
2035}
2036
2037static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2038 const struct cntr_entry *entry,
2039 void *context, int vl, int mode, u64 data)
2040{
2041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2042
2043 return dd->cce_err_status_cnt[9];
2044}
2045
2046static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2047 const struct cntr_entry *entry,
2048 void *context, int vl, int mode, u64 data)
2049{
2050 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2051
2052 return dd->cce_err_status_cnt[8];
2053}
2054
2055static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2056 void *context, int vl,
2057 int mode, u64 data)
2058{
2059 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2060
2061 return dd->cce_err_status_cnt[7];
2062}
2063
2064static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2065 const struct cntr_entry *entry,
2066 void *context, int vl, int mode, u64 data)
2067{
2068 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2069
2070 return dd->cce_err_status_cnt[6];
2071}
2072
2073static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2074 void *context, int vl, int mode,
2075 u64 data)
2076{
2077 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2078
2079 return dd->cce_err_status_cnt[5];
2080}
2081
2082static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2083 void *context, int vl, int mode,
2084 u64 data)
2085{
2086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2087
2088 return dd->cce_err_status_cnt[4];
2089}
2090
2091static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2092 const struct cntr_entry *entry,
2093 void *context, int vl, int mode, u64 data)
2094{
2095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2096
2097 return dd->cce_err_status_cnt[3];
2098}
2099
2100static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2101 void *context, int vl,
2102 int mode, u64 data)
2103{
2104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2105
2106 return dd->cce_err_status_cnt[2];
2107}
2108
2109static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2110 void *context, int vl,
2111 int mode, u64 data)
2112{
2113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2114
2115 return dd->cce_err_status_cnt[1];
2116}
2117
2118static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2119 void *context, int vl, int mode,
2120 u64 data)
2121{
2122 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2123
2124 return dd->cce_err_status_cnt[0];
2125}
2126
2127/*
2128 * Software counters corresponding to each of the
2129 * error status bits within RcvErrStatus
2130 */
2131static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2132 void *context, int vl, int mode,
2133 u64 data)
2134{
2135 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2136
2137 return dd->rcv_err_status_cnt[63];
2138}
2139
2140static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2141 void *context, int vl,
2142 int mode, u64 data)
2143{
2144 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2145
2146 return dd->rcv_err_status_cnt[62];
2147}
2148
2149static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2150 void *context, int vl, int mode,
2151 u64 data)
2152{
2153 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2154
2155 return dd->rcv_err_status_cnt[61];
2156}
2157
2158static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2159 void *context, int vl, int mode,
2160 u64 data)
2161{
2162 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2163
2164 return dd->rcv_err_status_cnt[60];
2165}
2166
2167static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2168 void *context, int vl,
2169 int mode, u64 data)
2170{
2171 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2172
2173 return dd->rcv_err_status_cnt[59];
2174}
2175
2176static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2177 void *context, int vl,
2178 int mode, u64 data)
2179{
2180 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2181
2182 return dd->rcv_err_status_cnt[58];
2183}
2184
2185static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2186 void *context, int vl, int mode,
2187 u64 data)
2188{
2189 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2190
2191 return dd->rcv_err_status_cnt[57];
2192}
2193
2194static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2195 void *context, int vl, int mode,
2196 u64 data)
2197{
2198 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2199
2200 return dd->rcv_err_status_cnt[56];
2201}
2202
2203static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2204 void *context, int vl, int mode,
2205 u64 data)
2206{
2207 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2208
2209 return dd->rcv_err_status_cnt[55];
2210}
2211
2212static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2213 const struct cntr_entry *entry,
2214 void *context, int vl, int mode, u64 data)
2215{
2216 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2217
2218 return dd->rcv_err_status_cnt[54];
2219}
2220
2221static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2222 const struct cntr_entry *entry,
2223 void *context, int vl, int mode, u64 data)
2224{
2225 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2226
2227 return dd->rcv_err_status_cnt[53];
2228}
2229
2230static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2231 void *context, int vl,
2232 int mode, u64 data)
2233{
2234 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2235
2236 return dd->rcv_err_status_cnt[52];
2237}
2238
2239static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2240 void *context, int vl,
2241 int mode, u64 data)
2242{
2243 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2244
2245 return dd->rcv_err_status_cnt[51];
2246}
2247
2248static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2249 void *context, int vl,
2250 int mode, u64 data)
2251{
2252 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2253
2254 return dd->rcv_err_status_cnt[50];
2255}
2256
2257static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2258 void *context, int vl,
2259 int mode, u64 data)
2260{
2261 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2262
2263 return dd->rcv_err_status_cnt[49];
2264}
2265
2266static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2267 void *context, int vl,
2268 int mode, u64 data)
2269{
2270 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2271
2272 return dd->rcv_err_status_cnt[48];
2273}
2274
2275static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2276 void *context, int vl,
2277 int mode, u64 data)
2278{
2279 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2280
2281 return dd->rcv_err_status_cnt[47];
2282}
2283
2284static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2285 void *context, int vl, int mode,
2286 u64 data)
2287{
2288 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2289
2290 return dd->rcv_err_status_cnt[46];
2291}
2292
2293static u64 access_rx_hq_intr_csr_parity_err_cnt(
2294 const struct cntr_entry *entry,
2295 void *context, int vl, int mode, u64 data)
2296{
2297 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2298
2299 return dd->rcv_err_status_cnt[45];
2300}
2301
2302static u64 access_rx_lookup_csr_parity_err_cnt(
2303 const struct cntr_entry *entry,
2304 void *context, int vl, int mode, u64 data)
2305{
2306 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2307
2308 return dd->rcv_err_status_cnt[44];
2309}
2310
2311static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2312 const struct cntr_entry *entry,
2313 void *context, int vl, int mode, u64 data)
2314{
2315 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2316
2317 return dd->rcv_err_status_cnt[43];
2318}
2319
2320static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2321 const struct cntr_entry *entry,
2322 void *context, int vl, int mode, u64 data)
2323{
2324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2325
2326 return dd->rcv_err_status_cnt[42];
2327}
2328
2329static u64 access_rx_lookup_des_part2_parity_err_cnt(
2330 const struct cntr_entry *entry,
2331 void *context, int vl, int mode, u64 data)
2332{
2333 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2334
2335 return dd->rcv_err_status_cnt[41];
2336}
2337
2338static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2339 const struct cntr_entry *entry,
2340 void *context, int vl, int mode, u64 data)
2341{
2342 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2343
2344 return dd->rcv_err_status_cnt[40];
2345}
2346
2347static u64 access_rx_lookup_des_part1_unc_err_cnt(
2348 const struct cntr_entry *entry,
2349 void *context, int vl, int mode, u64 data)
2350{
2351 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2352
2353 return dd->rcv_err_status_cnt[39];
2354}
2355
2356static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2357 const struct cntr_entry *entry,
2358 void *context, int vl, int mode, u64 data)
2359{
2360 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2361
2362 return dd->rcv_err_status_cnt[38];
2363}
2364
2365static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2366 const struct cntr_entry *entry,
2367 void *context, int vl, int mode, u64 data)
2368{
2369 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2370
2371 return dd->rcv_err_status_cnt[37];
2372}
2373
2374static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2375 const struct cntr_entry *entry,
2376 void *context, int vl, int mode, u64 data)
2377{
2378 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2379
2380 return dd->rcv_err_status_cnt[36];
2381}
2382
2383static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2384 const struct cntr_entry *entry,
2385 void *context, int vl, int mode, u64 data)
2386{
2387 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2388
2389 return dd->rcv_err_status_cnt[35];
2390}
2391
2392static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2393 const struct cntr_entry *entry,
2394 void *context, int vl, int mode, u64 data)
2395{
2396 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2397
2398 return dd->rcv_err_status_cnt[34];
2399}
2400
2401static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2402 const struct cntr_entry *entry,
2403 void *context, int vl, int mode, u64 data)
2404{
2405 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2406
2407 return dd->rcv_err_status_cnt[33];
2408}
2409
2410static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2411 void *context, int vl, int mode,
2412 u64 data)
2413{
2414 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2415
2416 return dd->rcv_err_status_cnt[32];
2417}
2418
2419static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2420 void *context, int vl, int mode,
2421 u64 data)
2422{
2423 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2424
2425 return dd->rcv_err_status_cnt[31];
2426}
2427
2428static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2429 void *context, int vl, int mode,
2430 u64 data)
2431{
2432 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2433
2434 return dd->rcv_err_status_cnt[30];
2435}
2436
2437static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2438 void *context, int vl, int mode,
2439 u64 data)
2440{
2441 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2442
2443 return dd->rcv_err_status_cnt[29];
2444}
2445
2446static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2447 void *context, int vl,
2448 int mode, u64 data)
2449{
2450 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2451
2452 return dd->rcv_err_status_cnt[28];
2453}
2454
2455static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2456 const struct cntr_entry *entry,
2457 void *context, int vl, int mode, u64 data)
2458{
2459 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2460
2461 return dd->rcv_err_status_cnt[27];
2462}
2463
2464static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2465 const struct cntr_entry *entry,
2466 void *context, int vl, int mode, u64 data)
2467{
2468 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2469
2470 return dd->rcv_err_status_cnt[26];
2471}
2472
2473static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2474 const struct cntr_entry *entry,
2475 void *context, int vl, int mode, u64 data)
2476{
2477 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2478
2479 return dd->rcv_err_status_cnt[25];
2480}
2481
2482static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2483 const struct cntr_entry *entry,
2484 void *context, int vl, int mode, u64 data)
2485{
2486 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2487
2488 return dd->rcv_err_status_cnt[24];
2489}
2490
2491static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2492 const struct cntr_entry *entry,
2493 void *context, int vl, int mode, u64 data)
2494{
2495 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2496
2497 return dd->rcv_err_status_cnt[23];
2498}
2499
2500static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2501 const struct cntr_entry *entry,
2502 void *context, int vl, int mode, u64 data)
2503{
2504 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2505
2506 return dd->rcv_err_status_cnt[22];
2507}
2508
2509static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2510 const struct cntr_entry *entry,
2511 void *context, int vl, int mode, u64 data)
2512{
2513 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2514
2515 return dd->rcv_err_status_cnt[21];
2516}
2517
2518static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2519 const struct cntr_entry *entry,
2520 void *context, int vl, int mode, u64 data)
2521{
2522 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2523
2524 return dd->rcv_err_status_cnt[20];
2525}
2526
2527static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2528 const struct cntr_entry *entry,
2529 void *context, int vl, int mode, u64 data)
2530{
2531 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2532
2533 return dd->rcv_err_status_cnt[19];
2534}
2535
2536static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2537 void *context, int vl,
2538 int mode, u64 data)
2539{
2540 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2541
2542 return dd->rcv_err_status_cnt[18];
2543}
2544
2545static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2546 void *context, int vl,
2547 int mode, u64 data)
2548{
2549 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2550
2551 return dd->rcv_err_status_cnt[17];
2552}
2553
2554static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2555 const struct cntr_entry *entry,
2556 void *context, int vl, int mode, u64 data)
2557{
2558 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2559
2560 return dd->rcv_err_status_cnt[16];
2561}
2562
2563static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2564 const struct cntr_entry *entry,
2565 void *context, int vl, int mode, u64 data)
2566{
2567 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2568
2569 return dd->rcv_err_status_cnt[15];
2570}
2571
2572static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2573 void *context, int vl,
2574 int mode, u64 data)
2575{
2576 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2577
2578 return dd->rcv_err_status_cnt[14];
2579}
2580
2581static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2582 void *context, int vl,
2583 int mode, u64 data)
2584{
2585 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2586
2587 return dd->rcv_err_status_cnt[13];
2588}
2589
2590static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2591 void *context, int vl, int mode,
2592 u64 data)
2593{
2594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2595
2596 return dd->rcv_err_status_cnt[12];
2597}
2598
2599static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2600 void *context, int vl, int mode,
2601 u64 data)
2602{
2603 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2604
2605 return dd->rcv_err_status_cnt[11];
2606}
2607
2608static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2609 void *context, int vl, int mode,
2610 u64 data)
2611{
2612 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2613
2614 return dd->rcv_err_status_cnt[10];
2615}
2616
2617static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2618 void *context, int vl, int mode,
2619 u64 data)
2620{
2621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2622
2623 return dd->rcv_err_status_cnt[9];
2624}
2625
2626static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2627 void *context, int vl, int mode,
2628 u64 data)
2629{
2630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2631
2632 return dd->rcv_err_status_cnt[8];
2633}
2634
2635static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2636 const struct cntr_entry *entry,
2637 void *context, int vl, int mode, u64 data)
2638{
2639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2640
2641 return dd->rcv_err_status_cnt[7];
2642}
2643
2644static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2645 const struct cntr_entry *entry,
2646 void *context, int vl, int mode, u64 data)
2647{
2648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2649
2650 return dd->rcv_err_status_cnt[6];
2651}
2652
2653static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2654 void *context, int vl, int mode,
2655 u64 data)
2656{
2657 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2658
2659 return dd->rcv_err_status_cnt[5];
2660}
2661
2662static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2663 void *context, int vl, int mode,
2664 u64 data)
2665{
2666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2667
2668 return dd->rcv_err_status_cnt[4];
2669}
2670
2671static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2672 void *context, int vl, int mode,
2673 u64 data)
2674{
2675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2676
2677 return dd->rcv_err_status_cnt[3];
2678}
2679
2680static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2681 void *context, int vl, int mode,
2682 u64 data)
2683{
2684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2685
2686 return dd->rcv_err_status_cnt[2];
2687}
2688
2689static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2690 void *context, int vl, int mode,
2691 u64 data)
2692{
2693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2694
2695 return dd->rcv_err_status_cnt[1];
2696}
2697
2698static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2699 void *context, int vl, int mode,
2700 u64 data)
2701{
2702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2703
2704 return dd->rcv_err_status_cnt[0];
2705}
2706
2707/*
2708 * Software counters corresponding to each of the
2709 * error status bits within SendPioErrStatus
2710 */
2711static u64 access_pio_pec_sop_head_parity_err_cnt(
2712 const struct cntr_entry *entry,
2713 void *context, int vl, int mode, u64 data)
2714{
2715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2716
2717 return dd->send_pio_err_status_cnt[35];
2718}
2719
2720static u64 access_pio_pcc_sop_head_parity_err_cnt(
2721 const struct cntr_entry *entry,
2722 void *context, int vl, int mode, u64 data)
2723{
2724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2725
2726 return dd->send_pio_err_status_cnt[34];
2727}
2728
2729static u64 access_pio_last_returned_cnt_parity_err_cnt(
2730 const struct cntr_entry *entry,
2731 void *context, int vl, int mode, u64 data)
2732{
2733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2734
2735 return dd->send_pio_err_status_cnt[33];
2736}
2737
2738static u64 access_pio_current_free_cnt_parity_err_cnt(
2739 const struct cntr_entry *entry,
2740 void *context, int vl, int mode, u64 data)
2741{
2742 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2743
2744 return dd->send_pio_err_status_cnt[32];
2745}
2746
2747static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2748 void *context, int vl, int mode,
2749 u64 data)
2750{
2751 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2752
2753 return dd->send_pio_err_status_cnt[31];
2754}
2755
2756static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2757 void *context, int vl, int mode,
2758 u64 data)
2759{
2760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2761
2762 return dd->send_pio_err_status_cnt[30];
2763}
2764
2765static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2766 void *context, int vl, int mode,
2767 u64 data)
2768{
2769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2770
2771 return dd->send_pio_err_status_cnt[29];
2772}
2773
2774static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2775 const struct cntr_entry *entry,
2776 void *context, int vl, int mode, u64 data)
2777{
2778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2779
2780 return dd->send_pio_err_status_cnt[28];
2781}
2782
2783static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2784 void *context, int vl, int mode,
2785 u64 data)
2786{
2787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2788
2789 return dd->send_pio_err_status_cnt[27];
2790}
2791
2792static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2793 void *context, int vl, int mode,
2794 u64 data)
2795{
2796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2797
2798 return dd->send_pio_err_status_cnt[26];
2799}
2800
2801static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2802 void *context, int vl,
2803 int mode, u64 data)
2804{
2805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2806
2807 return dd->send_pio_err_status_cnt[25];
2808}
2809
2810static u64 access_pio_block_qw_count_parity_err_cnt(
2811 const struct cntr_entry *entry,
2812 void *context, int vl, int mode, u64 data)
2813{
2814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2815
2816 return dd->send_pio_err_status_cnt[24];
2817}
2818
2819static u64 access_pio_write_qw_valid_parity_err_cnt(
2820 const struct cntr_entry *entry,
2821 void *context, int vl, int mode, u64 data)
2822{
2823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2824
2825 return dd->send_pio_err_status_cnt[23];
2826}
2827
2828static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2829 void *context, int vl, int mode,
2830 u64 data)
2831{
2832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2833
2834 return dd->send_pio_err_status_cnt[22];
2835}
2836
2837static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2838 void *context, int vl,
2839 int mode, u64 data)
2840{
2841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2842
2843 return dd->send_pio_err_status_cnt[21];
2844}
2845
2846static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2847 void *context, int vl,
2848 int mode, u64 data)
2849{
2850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2851
2852 return dd->send_pio_err_status_cnt[20];
2853}
2854
2855static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2856 void *context, int vl,
2857 int mode, u64 data)
2858{
2859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2860
2861 return dd->send_pio_err_status_cnt[19];
2862}
2863
2864static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2865 const struct cntr_entry *entry,
2866 void *context, int vl, int mode, u64 data)
2867{
2868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2869
2870 return dd->send_pio_err_status_cnt[18];
2871}
2872
2873static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2874 void *context, int vl, int mode,
2875 u64 data)
2876{
2877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2878
2879 return dd->send_pio_err_status_cnt[17];
2880}
2881
2882static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2883 void *context, int vl, int mode,
2884 u64 data)
2885{
2886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2887
2888 return dd->send_pio_err_status_cnt[16];
2889}
2890
2891static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2892 const struct cntr_entry *entry,
2893 void *context, int vl, int mode, u64 data)
2894{
2895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2896
2897 return dd->send_pio_err_status_cnt[15];
2898}
2899
2900static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2901 const struct cntr_entry *entry,
2902 void *context, int vl, int mode, u64 data)
2903{
2904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2905
2906 return dd->send_pio_err_status_cnt[14];
2907}
2908
2909static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2910 const struct cntr_entry *entry,
2911 void *context, int vl, int mode, u64 data)
2912{
2913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2914
2915 return dd->send_pio_err_status_cnt[13];
2916}
2917
2918static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2919 const struct cntr_entry *entry,
2920 void *context, int vl, int mode, u64 data)
2921{
2922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2923
2924 return dd->send_pio_err_status_cnt[12];
2925}
2926
2927static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2928 const struct cntr_entry *entry,
2929 void *context, int vl, int mode, u64 data)
2930{
2931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2932
2933 return dd->send_pio_err_status_cnt[11];
2934}
2935
2936static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2937 const struct cntr_entry *entry,
2938 void *context, int vl, int mode, u64 data)
2939{
2940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2941
2942 return dd->send_pio_err_status_cnt[10];
2943}
2944
2945static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2946 const struct cntr_entry *entry,
2947 void *context, int vl, int mode, u64 data)
2948{
2949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2950
2951 return dd->send_pio_err_status_cnt[9];
2952}
2953
2954static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2955 const struct cntr_entry *entry,
2956 void *context, int vl, int mode, u64 data)
2957{
2958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2959
2960 return dd->send_pio_err_status_cnt[8];
2961}
2962
2963static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2964 const struct cntr_entry *entry,
2965 void *context, int vl, int mode, u64 data)
2966{
2967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2968
2969 return dd->send_pio_err_status_cnt[7];
2970}
2971
2972static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2973 void *context, int vl, int mode,
2974 u64 data)
2975{
2976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2977
2978 return dd->send_pio_err_status_cnt[6];
2979}
2980
2981static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2982 void *context, int vl, int mode,
2983 u64 data)
2984{
2985 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2986
2987 return dd->send_pio_err_status_cnt[5];
2988}
2989
2990static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2991 void *context, int vl, int mode,
2992 u64 data)
2993{
2994 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2995
2996 return dd->send_pio_err_status_cnt[4];
2997}
2998
2999static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3000 void *context, int vl, int mode,
3001 u64 data)
3002{
3003 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3004
3005 return dd->send_pio_err_status_cnt[3];
3006}
3007
3008static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3009 void *context, int vl, int mode,
3010 u64 data)
3011{
3012 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3013
3014 return dd->send_pio_err_status_cnt[2];
3015}
3016
3017static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3018 void *context, int vl,
3019 int mode, u64 data)
3020{
3021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022
3023 return dd->send_pio_err_status_cnt[1];
3024}
3025
3026static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3027 void *context, int vl, int mode,
3028 u64 data)
3029{
3030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3031
3032 return dd->send_pio_err_status_cnt[0];
3033}
3034
3035/*
3036 * Software counters corresponding to each of the
3037 * error status bits within SendDmaErrStatus
3038 */
3039static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3040 const struct cntr_entry *entry,
3041 void *context, int vl, int mode, u64 data)
3042{
3043 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3044
3045 return dd->send_dma_err_status_cnt[3];
3046}
3047
3048static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3049 const struct cntr_entry *entry,
3050 void *context, int vl, int mode, u64 data)
3051{
3052 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3053
3054 return dd->send_dma_err_status_cnt[2];
3055}
3056
3057static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3058 void *context, int vl, int mode,
3059 u64 data)
3060{
3061 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3062
3063 return dd->send_dma_err_status_cnt[1];
3064}
3065
3066static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3067 void *context, int vl, int mode,
3068 u64 data)
3069{
3070 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3071
3072 return dd->send_dma_err_status_cnt[0];
3073}
3074
3075/*
3076 * Software counters corresponding to each of the
3077 * error status bits within SendEgressErrStatus
3078 */
3079static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3080 const struct cntr_entry *entry,
3081 void *context, int vl, int mode, u64 data)
3082{
3083 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084
3085 return dd->send_egress_err_status_cnt[63];
3086}
3087
3088static u64 access_tx_read_sdma_memory_csr_err_cnt(
3089 const struct cntr_entry *entry,
3090 void *context, int vl, int mode, u64 data)
3091{
3092 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093
3094 return dd->send_egress_err_status_cnt[62];
3095}
3096
3097static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3098 void *context, int vl, int mode,
3099 u64 data)
3100{
3101 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102
3103 return dd->send_egress_err_status_cnt[61];
3104}
3105
3106static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3107 void *context, int vl,
3108 int mode, u64 data)
3109{
3110 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111
3112 return dd->send_egress_err_status_cnt[60];
3113}
3114
3115static u64 access_tx_read_sdma_memory_cor_err_cnt(
3116 const struct cntr_entry *entry,
3117 void *context, int vl, int mode, u64 data)
3118{
3119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120
3121 return dd->send_egress_err_status_cnt[59];
3122}
3123
3124static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3125 void *context, int vl, int mode,
3126 u64 data)
3127{
3128 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3129
3130 return dd->send_egress_err_status_cnt[58];
3131}
3132
3133static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3134 void *context, int vl, int mode,
3135 u64 data)
3136{
3137 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3138
3139 return dd->send_egress_err_status_cnt[57];
3140}
3141
3142static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3143 void *context, int vl, int mode,
3144 u64 data)
3145{
3146 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3147
3148 return dd->send_egress_err_status_cnt[56];
3149}
3150
3151static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3152 void *context, int vl, int mode,
3153 u64 data)
3154{
3155 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3156
3157 return dd->send_egress_err_status_cnt[55];
3158}
3159
3160static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3161 void *context, int vl, int mode,
3162 u64 data)
3163{
3164 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3165
3166 return dd->send_egress_err_status_cnt[54];
3167}
3168
3169static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3170 void *context, int vl, int mode,
3171 u64 data)
3172{
3173 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3174
3175 return dd->send_egress_err_status_cnt[53];
3176}
3177
3178static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3179 void *context, int vl, int mode,
3180 u64 data)
3181{
3182 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3183
3184 return dd->send_egress_err_status_cnt[52];
3185}
3186
3187static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3188 void *context, int vl, int mode,
3189 u64 data)
3190{
3191 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3192
3193 return dd->send_egress_err_status_cnt[51];
3194}
3195
3196static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3197 void *context, int vl, int mode,
3198 u64 data)
3199{
3200 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3201
3202 return dd->send_egress_err_status_cnt[50];
3203}
3204
3205static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3206 void *context, int vl, int mode,
3207 u64 data)
3208{
3209 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3210
3211 return dd->send_egress_err_status_cnt[49];
3212}
3213
3214static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3215 void *context, int vl, int mode,
3216 u64 data)
3217{
3218 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3219
3220 return dd->send_egress_err_status_cnt[48];
3221}
3222
3223static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3224 void *context, int vl, int mode,
3225 u64 data)
3226{
3227 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3228
3229 return dd->send_egress_err_status_cnt[47];
3230}
3231
3232static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3233 void *context, int vl, int mode,
3234 u64 data)
3235{
3236 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3237
3238 return dd->send_egress_err_status_cnt[46];
3239}
3240
3241static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3242 void *context, int vl, int mode,
3243 u64 data)
3244{
3245 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3246
3247 return dd->send_egress_err_status_cnt[45];
3248}
3249
3250static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3251 void *context, int vl,
3252 int mode, u64 data)
3253{
3254 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3255
3256 return dd->send_egress_err_status_cnt[44];
3257}
3258
3259static u64 access_tx_read_sdma_memory_unc_err_cnt(
3260 const struct cntr_entry *entry,
3261 void *context, int vl, int mode, u64 data)
3262{
3263 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3264
3265 return dd->send_egress_err_status_cnt[43];
3266}
3267
3268static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3269 void *context, int vl, int mode,
3270 u64 data)
3271{
3272 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3273
3274 return dd->send_egress_err_status_cnt[42];
3275}
3276
3277static u64 access_tx_credit_return_partiy_err_cnt(
3278 const struct cntr_entry *entry,
3279 void *context, int vl, int mode, u64 data)
3280{
3281 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3282
3283 return dd->send_egress_err_status_cnt[41];
3284}
3285
3286static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3287 const struct cntr_entry *entry,
3288 void *context, int vl, int mode, u64 data)
3289{
3290 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3291
3292 return dd->send_egress_err_status_cnt[40];
3293}
3294
3295static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3296 const struct cntr_entry *entry,
3297 void *context, int vl, int mode, u64 data)
3298{
3299 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3300
3301 return dd->send_egress_err_status_cnt[39];
3302}
3303
3304static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3305 const struct cntr_entry *entry,
3306 void *context, int vl, int mode, u64 data)
3307{
3308 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3309
3310 return dd->send_egress_err_status_cnt[38];
3311}
3312
3313static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3314 const struct cntr_entry *entry,
3315 void *context, int vl, int mode, u64 data)
3316{
3317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3318
3319 return dd->send_egress_err_status_cnt[37];
3320}
3321
3322static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3323 const struct cntr_entry *entry,
3324 void *context, int vl, int mode, u64 data)
3325{
3326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3327
3328 return dd->send_egress_err_status_cnt[36];
3329}
3330
3331static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3332 const struct cntr_entry *entry,
3333 void *context, int vl, int mode, u64 data)
3334{
3335 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3336
3337 return dd->send_egress_err_status_cnt[35];
3338}
3339
3340static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3341 const struct cntr_entry *entry,
3342 void *context, int vl, int mode, u64 data)
3343{
3344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3345
3346 return dd->send_egress_err_status_cnt[34];
3347}
3348
3349static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3350 const struct cntr_entry *entry,
3351 void *context, int vl, int mode, u64 data)
3352{
3353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3354
3355 return dd->send_egress_err_status_cnt[33];
3356}
3357
3358static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3359 const struct cntr_entry *entry,
3360 void *context, int vl, int mode, u64 data)
3361{
3362 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3363
3364 return dd->send_egress_err_status_cnt[32];
3365}
3366
3367static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3368 const struct cntr_entry *entry,
3369 void *context, int vl, int mode, u64 data)
3370{
3371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3372
3373 return dd->send_egress_err_status_cnt[31];
3374}
3375
3376static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3377 const struct cntr_entry *entry,
3378 void *context, int vl, int mode, u64 data)
3379{
3380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3381
3382 return dd->send_egress_err_status_cnt[30];
3383}
3384
3385static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3386 const struct cntr_entry *entry,
3387 void *context, int vl, int mode, u64 data)
3388{
3389 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3390
3391 return dd->send_egress_err_status_cnt[29];
3392}
3393
3394static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3395 const struct cntr_entry *entry,
3396 void *context, int vl, int mode, u64 data)
3397{
3398 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3399
3400 return dd->send_egress_err_status_cnt[28];
3401}
3402
3403static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3404 const struct cntr_entry *entry,
3405 void *context, int vl, int mode, u64 data)
3406{
3407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3408
3409 return dd->send_egress_err_status_cnt[27];
3410}
3411
3412static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3413 const struct cntr_entry *entry,
3414 void *context, int vl, int mode, u64 data)
3415{
3416 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3417
3418 return dd->send_egress_err_status_cnt[26];
3419}
3420
3421static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3422 const struct cntr_entry *entry,
3423 void *context, int vl, int mode, u64 data)
3424{
3425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3426
3427 return dd->send_egress_err_status_cnt[25];
3428}
3429
3430static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3431 const struct cntr_entry *entry,
3432 void *context, int vl, int mode, u64 data)
3433{
3434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3435
3436 return dd->send_egress_err_status_cnt[24];
3437}
3438
3439static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3440 const struct cntr_entry *entry,
3441 void *context, int vl, int mode, u64 data)
3442{
3443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3444
3445 return dd->send_egress_err_status_cnt[23];
3446}
3447
3448static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3449 const struct cntr_entry *entry,
3450 void *context, int vl, int mode, u64 data)
3451{
3452 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3453
3454 return dd->send_egress_err_status_cnt[22];
3455}
3456
3457static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3458 const struct cntr_entry *entry,
3459 void *context, int vl, int mode, u64 data)
3460{
3461 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3462
3463 return dd->send_egress_err_status_cnt[21];
3464}
3465
3466static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3467 const struct cntr_entry *entry,
3468 void *context, int vl, int mode, u64 data)
3469{
3470 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3471
3472 return dd->send_egress_err_status_cnt[20];
3473}
3474
3475static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3476 const struct cntr_entry *entry,
3477 void *context, int vl, int mode, u64 data)
3478{
3479 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3480
3481 return dd->send_egress_err_status_cnt[19];
3482}
3483
3484static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3485 const struct cntr_entry *entry,
3486 void *context, int vl, int mode, u64 data)
3487{
3488 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3489
3490 return dd->send_egress_err_status_cnt[18];
3491}
3492
3493static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3494 const struct cntr_entry *entry,
3495 void *context, int vl, int mode, u64 data)
3496{
3497 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3498
3499 return dd->send_egress_err_status_cnt[17];
3500}
3501
3502static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3503 const struct cntr_entry *entry,
3504 void *context, int vl, int mode, u64 data)
3505{
3506 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3507
3508 return dd->send_egress_err_status_cnt[16];
3509}
3510
3511static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3512 void *context, int vl, int mode,
3513 u64 data)
3514{
3515 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3516
3517 return dd->send_egress_err_status_cnt[15];
3518}
3519
3520static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3521 void *context, int vl,
3522 int mode, u64 data)
3523{
3524 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3525
3526 return dd->send_egress_err_status_cnt[14];
3527}
3528
3529static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3530 void *context, int vl, int mode,
3531 u64 data)
3532{
3533 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3534
3535 return dd->send_egress_err_status_cnt[13];
3536}
3537
3538static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3539 void *context, int vl, int mode,
3540 u64 data)
3541{
3542 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3543
3544 return dd->send_egress_err_status_cnt[12];
3545}
3546
3547static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3548 const struct cntr_entry *entry,
3549 void *context, int vl, int mode, u64 data)
3550{
3551 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3552
3553 return dd->send_egress_err_status_cnt[11];
3554}
3555
3556static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3557 void *context, int vl, int mode,
3558 u64 data)
3559{
3560 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3561
3562 return dd->send_egress_err_status_cnt[10];
3563}
3564
3565static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3566 void *context, int vl, int mode,
3567 u64 data)
3568{
3569 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3570
3571 return dd->send_egress_err_status_cnt[9];
3572}
3573
3574static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3575 const struct cntr_entry *entry,
3576 void *context, int vl, int mode, u64 data)
3577{
3578 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3579
3580 return dd->send_egress_err_status_cnt[8];
3581}
3582
3583static u64 access_tx_pio_launch_intf_parity_err_cnt(
3584 const struct cntr_entry *entry,
3585 void *context, int vl, int mode, u64 data)
3586{
3587 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3588
3589 return dd->send_egress_err_status_cnt[7];
3590}
3591
3592static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3593 void *context, int vl, int mode,
3594 u64 data)
3595{
3596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3597
3598 return dd->send_egress_err_status_cnt[6];
3599}
3600
3601static u64 access_tx_incorrect_link_state_err_cnt(
3602 const struct cntr_entry *entry,
3603 void *context, int vl, int mode, u64 data)
3604{
3605 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3606
3607 return dd->send_egress_err_status_cnt[5];
3608}
3609
3610static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3611 void *context, int vl, int mode,
3612 u64 data)
3613{
3614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3615
3616 return dd->send_egress_err_status_cnt[4];
3617}
3618
3619static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3620 const struct cntr_entry *entry,
3621 void *context, int vl, int mode, u64 data)
3622{
3623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3624
3625 return dd->send_egress_err_status_cnt[3];
3626}
3627
3628static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3629 void *context, int vl, int mode,
3630 u64 data)
3631{
3632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3633
3634 return dd->send_egress_err_status_cnt[2];
3635}
3636
3637static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3638 const struct cntr_entry *entry,
3639 void *context, int vl, int mode, u64 data)
3640{
3641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642
3643 return dd->send_egress_err_status_cnt[1];
3644}
3645
3646static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3647 const struct cntr_entry *entry,
3648 void *context, int vl, int mode, u64 data)
3649{
3650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3651
3652 return dd->send_egress_err_status_cnt[0];
3653}
3654
3655/*
3656 * Software counters corresponding to each of the
3657 * error status bits within SendErrStatus
3658 */
3659static u64 access_send_csr_write_bad_addr_err_cnt(
3660 const struct cntr_entry *entry,
3661 void *context, int vl, int mode, u64 data)
3662{
3663 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3664
3665 return dd->send_err_status_cnt[2];
3666}
3667
3668static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3669 void *context, int vl,
3670 int mode, u64 data)
3671{
3672 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3673
3674 return dd->send_err_status_cnt[1];
3675}
3676
3677static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3678 void *context, int vl, int mode,
3679 u64 data)
3680{
3681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3682
3683 return dd->send_err_status_cnt[0];
3684}
3685
3686/*
3687 * Software counters corresponding to each of the
3688 * error status bits within SendCtxtErrStatus
3689 */
3690static u64 access_pio_write_out_of_bounds_err_cnt(
3691 const struct cntr_entry *entry,
3692 void *context, int vl, int mode, u64 data)
3693{
3694 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695
3696 return dd->sw_ctxt_err_status_cnt[4];
3697}
3698
3699static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3700 void *context, int vl, int mode,
3701 u64 data)
3702{
3703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704
3705 return dd->sw_ctxt_err_status_cnt[3];
3706}
3707
3708static u64 access_pio_write_crosses_boundary_err_cnt(
3709 const struct cntr_entry *entry,
3710 void *context, int vl, int mode, u64 data)
3711{
3712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713
3714 return dd->sw_ctxt_err_status_cnt[2];
3715}
3716
3717static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3718 void *context, int vl,
3719 int mode, u64 data)
3720{
3721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723 return dd->sw_ctxt_err_status_cnt[1];
3724}
3725
3726static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3727 void *context, int vl, int mode,
3728 u64 data)
3729{
3730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732 return dd->sw_ctxt_err_status_cnt[0];
3733}
3734
3735/*
3736 * Software counters corresponding to each of the
3737 * error status bits within SendDmaEngErrStatus
3738 */
3739static u64 access_sdma_header_request_fifo_cor_err_cnt(
3740 const struct cntr_entry *entry,
3741 void *context, int vl, int mode, u64 data)
3742{
3743 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3744
3745 return dd->sw_send_dma_eng_err_status_cnt[23];
3746}
3747
3748static u64 access_sdma_header_storage_cor_err_cnt(
3749 const struct cntr_entry *entry,
3750 void *context, int vl, int mode, u64 data)
3751{
3752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753
3754 return dd->sw_send_dma_eng_err_status_cnt[22];
3755}
3756
3757static u64 access_sdma_packet_tracking_cor_err_cnt(
3758 const struct cntr_entry *entry,
3759 void *context, int vl, int mode, u64 data)
3760{
3761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762
3763 return dd->sw_send_dma_eng_err_status_cnt[21];
3764}
3765
3766static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3767 void *context, int vl, int mode,
3768 u64 data)
3769{
3770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771
3772 return dd->sw_send_dma_eng_err_status_cnt[20];
3773}
3774
3775static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3776 void *context, int vl, int mode,
3777 u64 data)
3778{
3779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3780
3781 return dd->sw_send_dma_eng_err_status_cnt[19];
3782}
3783
3784static u64 access_sdma_header_request_fifo_unc_err_cnt(
3785 const struct cntr_entry *entry,
3786 void *context, int vl, int mode, u64 data)
3787{
3788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3789
3790 return dd->sw_send_dma_eng_err_status_cnt[18];
3791}
3792
3793static u64 access_sdma_header_storage_unc_err_cnt(
3794 const struct cntr_entry *entry,
3795 void *context, int vl, int mode, u64 data)
3796{
3797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3798
3799 return dd->sw_send_dma_eng_err_status_cnt[17];
3800}
3801
3802static u64 access_sdma_packet_tracking_unc_err_cnt(
3803 const struct cntr_entry *entry,
3804 void *context, int vl, int mode, u64 data)
3805{
3806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3807
3808 return dd->sw_send_dma_eng_err_status_cnt[16];
3809}
3810
3811static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3812 void *context, int vl, int mode,
3813 u64 data)
3814{
3815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3816
3817 return dd->sw_send_dma_eng_err_status_cnt[15];
3818}
3819
3820static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3821 void *context, int vl, int mode,
3822 u64 data)
3823{
3824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3825
3826 return dd->sw_send_dma_eng_err_status_cnt[14];
3827}
3828
3829static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3830 void *context, int vl, int mode,
3831 u64 data)
3832{
3833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3834
3835 return dd->sw_send_dma_eng_err_status_cnt[13];
3836}
3837
3838static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3839 void *context, int vl, int mode,
3840 u64 data)
3841{
3842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3843
3844 return dd->sw_send_dma_eng_err_status_cnt[12];
3845}
3846
3847static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3848 void *context, int vl, int mode,
3849 u64 data)
3850{
3851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3852
3853 return dd->sw_send_dma_eng_err_status_cnt[11];
3854}
3855
3856static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3857 void *context, int vl, int mode,
3858 u64 data)
3859{
3860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3861
3862 return dd->sw_send_dma_eng_err_status_cnt[10];
3863}
3864
3865static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3866 void *context, int vl, int mode,
3867 u64 data)
3868{
3869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3870
3871 return dd->sw_send_dma_eng_err_status_cnt[9];
3872}
3873
3874static u64 access_sdma_packet_desc_overflow_err_cnt(
3875 const struct cntr_entry *entry,
3876 void *context, int vl, int mode, u64 data)
3877{
3878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3879
3880 return dd->sw_send_dma_eng_err_status_cnt[8];
3881}
3882
3883static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3884 void *context, int vl,
3885 int mode, u64 data)
3886{
3887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3888
3889 return dd->sw_send_dma_eng_err_status_cnt[7];
3890}
3891
3892static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3893 void *context, int vl, int mode, u64 data)
3894{
3895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896
3897 return dd->sw_send_dma_eng_err_status_cnt[6];
3898}
3899
3900static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3901 void *context, int vl, int mode,
3902 u64 data)
3903{
3904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905
3906 return dd->sw_send_dma_eng_err_status_cnt[5];
3907}
3908
3909static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3910 void *context, int vl, int mode,
3911 u64 data)
3912{
3913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914
3915 return dd->sw_send_dma_eng_err_status_cnt[4];
3916}
3917
3918static u64 access_sdma_tail_out_of_bounds_err_cnt(
3919 const struct cntr_entry *entry,
3920 void *context, int vl, int mode, u64 data)
3921{
3922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923
3924 return dd->sw_send_dma_eng_err_status_cnt[3];
3925}
3926
3927static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3928 void *context, int vl, int mode,
3929 u64 data)
3930{
3931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932
3933 return dd->sw_send_dma_eng_err_status_cnt[2];
3934}
3935
3936static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3937 void *context, int vl, int mode,
3938 u64 data)
3939{
3940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941
3942 return dd->sw_send_dma_eng_err_status_cnt[1];
3943}
3944
3945static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3946 void *context, int vl, int mode,
3947 u64 data)
3948{
3949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950
3951 return dd->sw_send_dma_eng_err_status_cnt[0];
3952}
3953
Mike Marciniszyn77241052015-07-30 15:17:43 -04003954#define def_access_sw_cpu(cntr) \
3955static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3956 void *context, int vl, int mode, u64 data) \
3957{ \
3958 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003959 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3960 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003961 mode, data); \
3962}
3963
3964def_access_sw_cpu(rc_acks);
3965def_access_sw_cpu(rc_qacks);
3966def_access_sw_cpu(rc_delayed_comp);
3967
3968#define def_access_ibp_counter(cntr) \
3969static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3970 void *context, int vl, int mode, u64 data) \
3971{ \
3972 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3973 \
3974 if (vl != CNTR_INVALID_VL) \
3975 return 0; \
3976 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003977 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003978 mode, data); \
3979}
3980
3981def_access_ibp_counter(loop_pkts);
3982def_access_ibp_counter(rc_resends);
3983def_access_ibp_counter(rnr_naks);
3984def_access_ibp_counter(other_naks);
3985def_access_ibp_counter(rc_timeouts);
3986def_access_ibp_counter(pkt_drops);
3987def_access_ibp_counter(dmawait);
3988def_access_ibp_counter(rc_seqnak);
3989def_access_ibp_counter(rc_dupreq);
3990def_access_ibp_counter(rdma_seq);
3991def_access_ibp_counter(unaligned);
3992def_access_ibp_counter(seq_naks);
3993
3994static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3995[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3996[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3997 CNTR_NORMAL),
3998[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3999 CNTR_NORMAL),
4000[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4001 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4002 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004003[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4004 CNTR_NORMAL),
4005[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4006 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4007[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4008 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4009[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4010 CNTR_NORMAL),
4011[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4012 CNTR_NORMAL),
4013[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4014 CNTR_NORMAL),
4015[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4016 CNTR_NORMAL),
4017[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4018 CNTR_NORMAL),
4019[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4020 CNTR_NORMAL),
4021[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4022 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4023[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4024 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4025[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4026 CNTR_SYNTH),
4027[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4028[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4029 CNTR_SYNTH),
4030[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4031 CNTR_SYNTH),
4032[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4033 CNTR_SYNTH),
4034[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4035 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4036[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4037 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4038 CNTR_SYNTH),
4039[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4040 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4041[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4042 CNTR_SYNTH),
4043[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4044 CNTR_SYNTH),
4045[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4046 CNTR_SYNTH),
4047[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4048 CNTR_SYNTH),
4049[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4050 CNTR_SYNTH),
4051[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4052 CNTR_SYNTH),
4053[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4054 CNTR_SYNTH),
4055[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4056 CNTR_SYNTH | CNTR_VL),
4057[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4058 CNTR_SYNTH | CNTR_VL),
4059[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4060[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4061 CNTR_SYNTH | CNTR_VL),
4062[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4063[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4064 CNTR_SYNTH | CNTR_VL),
4065[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4066 CNTR_SYNTH),
4067[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4068 CNTR_SYNTH | CNTR_VL),
4069[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4070 CNTR_SYNTH),
4071[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4072 CNTR_SYNTH | CNTR_VL),
4073[C_DC_TOTAL_CRC] =
4074 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4075 CNTR_SYNTH),
4076[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4077 CNTR_SYNTH),
4078[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4079 CNTR_SYNTH),
4080[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4081 CNTR_SYNTH),
4082[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4083 CNTR_SYNTH),
4084[C_DC_CRC_MULT_LN] =
4085 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4086 CNTR_SYNTH),
4087[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4088 CNTR_SYNTH),
4089[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4090 CNTR_SYNTH),
4091[C_DC_SEQ_CRC_CNT] =
4092 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4093 CNTR_SYNTH),
4094[C_DC_ESC0_ONLY_CNT] =
4095 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4096 CNTR_SYNTH),
4097[C_DC_ESC0_PLUS1_CNT] =
4098 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4099 CNTR_SYNTH),
4100[C_DC_ESC0_PLUS2_CNT] =
4101 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4102 CNTR_SYNTH),
4103[C_DC_REINIT_FROM_PEER_CNT] =
4104 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4105 CNTR_SYNTH),
4106[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4107 CNTR_SYNTH),
4108[C_DC_MISC_FLG_CNT] =
4109 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4110 CNTR_SYNTH),
4111[C_DC_PRF_GOOD_LTP_CNT] =
4112 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4113[C_DC_PRF_ACCEPTED_LTP_CNT] =
4114 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4115 CNTR_SYNTH),
4116[C_DC_PRF_RX_FLIT_CNT] =
4117 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4118[C_DC_PRF_TX_FLIT_CNT] =
4119 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4120[C_DC_PRF_CLK_CNTR] =
4121 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4122[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4123 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4124[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4125 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4126 CNTR_SYNTH),
4127[C_DC_PG_STS_TX_SBE_CNT] =
4128 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4129[C_DC_PG_STS_TX_MBE_CNT] =
4130 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4131 CNTR_SYNTH),
4132[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4133 access_sw_cpu_intr),
4134[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4135 access_sw_cpu_rcv_limit),
4136[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4137 access_sw_vtx_wait),
4138[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4139 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004140[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4141 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004142[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4143 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004144[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4145 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004146[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4147 SEND_DMA_DESC_FETCHED_CNT, 0,
4148 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4149 dev_access_u32_csr),
4150[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4151 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4152 access_sde_int_cnt),
4153[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4154 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4155 access_sde_err_cnt),
4156[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4157 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4158 access_sde_idle_int_cnt),
4159[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4160 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4161 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004162/* MISC_ERR_STATUS */
4163[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4164 CNTR_NORMAL,
4165 access_misc_pll_lock_fail_err_cnt),
4166[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4167 CNTR_NORMAL,
4168 access_misc_mbist_fail_err_cnt),
4169[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4170 CNTR_NORMAL,
4171 access_misc_invalid_eep_cmd_err_cnt),
4172[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4173 CNTR_NORMAL,
4174 access_misc_efuse_done_parity_err_cnt),
4175[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4176 CNTR_NORMAL,
4177 access_misc_efuse_write_err_cnt),
4178[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4179 0, CNTR_NORMAL,
4180 access_misc_efuse_read_bad_addr_err_cnt),
4181[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4182 CNTR_NORMAL,
4183 access_misc_efuse_csr_parity_err_cnt),
4184[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4185 CNTR_NORMAL,
4186 access_misc_fw_auth_failed_err_cnt),
4187[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4188 CNTR_NORMAL,
4189 access_misc_key_mismatch_err_cnt),
4190[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4191 CNTR_NORMAL,
4192 access_misc_sbus_write_failed_err_cnt),
4193[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4194 CNTR_NORMAL,
4195 access_misc_csr_write_bad_addr_err_cnt),
4196[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4197 CNTR_NORMAL,
4198 access_misc_csr_read_bad_addr_err_cnt),
4199[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4200 CNTR_NORMAL,
4201 access_misc_csr_parity_err_cnt),
4202/* CceErrStatus */
4203[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4204 CNTR_NORMAL,
4205 access_sw_cce_err_status_aggregated_cnt),
4206[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4207 CNTR_NORMAL,
4208 access_cce_msix_csr_parity_err_cnt),
4209[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4210 CNTR_NORMAL,
4211 access_cce_int_map_unc_err_cnt),
4212[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4213 CNTR_NORMAL,
4214 access_cce_int_map_cor_err_cnt),
4215[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4216 CNTR_NORMAL,
4217 access_cce_msix_table_unc_err_cnt),
4218[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4219 CNTR_NORMAL,
4220 access_cce_msix_table_cor_err_cnt),
4221[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4222 0, CNTR_NORMAL,
4223 access_cce_rxdma_conv_fifo_parity_err_cnt),
4224[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4225 0, CNTR_NORMAL,
4226 access_cce_rcpl_async_fifo_parity_err_cnt),
4227[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4228 CNTR_NORMAL,
4229 access_cce_seg_write_bad_addr_err_cnt),
4230[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4231 CNTR_NORMAL,
4232 access_cce_seg_read_bad_addr_err_cnt),
4233[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4234 CNTR_NORMAL,
4235 access_la_triggered_cnt),
4236[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4237 CNTR_NORMAL,
4238 access_cce_trgt_cpl_timeout_err_cnt),
4239[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4240 CNTR_NORMAL,
4241 access_pcic_receive_parity_err_cnt),
4242[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4243 CNTR_NORMAL,
4244 access_pcic_transmit_back_parity_err_cnt),
4245[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4246 0, CNTR_NORMAL,
4247 access_pcic_transmit_front_parity_err_cnt),
4248[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4249 CNTR_NORMAL,
4250 access_pcic_cpl_dat_q_unc_err_cnt),
4251[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4252 CNTR_NORMAL,
4253 access_pcic_cpl_hd_q_unc_err_cnt),
4254[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4255 CNTR_NORMAL,
4256 access_pcic_post_dat_q_unc_err_cnt),
4257[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4258 CNTR_NORMAL,
4259 access_pcic_post_hd_q_unc_err_cnt),
4260[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4261 CNTR_NORMAL,
4262 access_pcic_retry_sot_mem_unc_err_cnt),
4263[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4264 CNTR_NORMAL,
4265 access_pcic_retry_mem_unc_err),
4266[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4267 CNTR_NORMAL,
4268 access_pcic_n_post_dat_q_parity_err_cnt),
4269[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4270 CNTR_NORMAL,
4271 access_pcic_n_post_h_q_parity_err_cnt),
4272[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4273 CNTR_NORMAL,
4274 access_pcic_cpl_dat_q_cor_err_cnt),
4275[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4276 CNTR_NORMAL,
4277 access_pcic_cpl_hd_q_cor_err_cnt),
4278[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4279 CNTR_NORMAL,
4280 access_pcic_post_dat_q_cor_err_cnt),
4281[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4282 CNTR_NORMAL,
4283 access_pcic_post_hd_q_cor_err_cnt),
4284[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4285 CNTR_NORMAL,
4286 access_pcic_retry_sot_mem_cor_err_cnt),
4287[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4288 CNTR_NORMAL,
4289 access_pcic_retry_mem_cor_err_cnt),
4290[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4291 "CceCli1AsyncFifoDbgParityError", 0, 0,
4292 CNTR_NORMAL,
4293 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4294[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4295 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4296 CNTR_NORMAL,
4297 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4298 ),
4299[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4300 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4301 CNTR_NORMAL,
4302 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4303[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4304 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4305 CNTR_NORMAL,
4306 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4307[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4308 0, CNTR_NORMAL,
4309 access_cce_cli2_async_fifo_parity_err_cnt),
4310[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4311 CNTR_NORMAL,
4312 access_cce_csr_cfg_bus_parity_err_cnt),
4313[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4314 0, CNTR_NORMAL,
4315 access_cce_cli0_async_fifo_parity_err_cnt),
4316[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4317 CNTR_NORMAL,
4318 access_cce_rspd_data_parity_err_cnt),
4319[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4320 CNTR_NORMAL,
4321 access_cce_trgt_access_err_cnt),
4322[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4323 0, CNTR_NORMAL,
4324 access_cce_trgt_async_fifo_parity_err_cnt),
4325[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4326 CNTR_NORMAL,
4327 access_cce_csr_write_bad_addr_err_cnt),
4328[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4329 CNTR_NORMAL,
4330 access_cce_csr_read_bad_addr_err_cnt),
4331[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4332 CNTR_NORMAL,
4333 access_ccs_csr_parity_err_cnt),
4334
4335/* RcvErrStatus */
4336[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_rx_csr_parity_err_cnt),
4339[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4340 CNTR_NORMAL,
4341 access_rx_csr_write_bad_addr_err_cnt),
4342[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_rx_csr_read_bad_addr_err_cnt),
4345[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_rx_dma_csr_unc_err_cnt),
4348[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_rx_dma_dq_fsm_encoding_err_cnt),
4351[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4352 CNTR_NORMAL,
4353 access_rx_dma_eq_fsm_encoding_err_cnt),
4354[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_rx_dma_csr_parity_err_cnt),
4357[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_rx_rbuf_data_cor_err_cnt),
4360[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_rx_rbuf_data_unc_err_cnt),
4363[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_rx_dma_data_fifo_rd_cor_err_cnt),
4366[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_rx_dma_data_fifo_rd_unc_err_cnt),
4369[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4372[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4375[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_rx_rbuf_desc_part2_cor_err_cnt),
4378[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_rx_rbuf_desc_part2_unc_err_cnt),
4381[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4382 CNTR_NORMAL,
4383 access_rx_rbuf_desc_part1_cor_err_cnt),
4384[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_rx_rbuf_desc_part1_unc_err_cnt),
4387[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_rx_hq_intr_fsm_err_cnt),
4390[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_rx_hq_intr_csr_parity_err_cnt),
4393[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4394 CNTR_NORMAL,
4395 access_rx_lookup_csr_parity_err_cnt),
4396[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4397 CNTR_NORMAL,
4398 access_rx_lookup_rcv_array_cor_err_cnt),
4399[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4400 CNTR_NORMAL,
4401 access_rx_lookup_rcv_array_unc_err_cnt),
4402[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4403 0, CNTR_NORMAL,
4404 access_rx_lookup_des_part2_parity_err_cnt),
4405[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4406 0, CNTR_NORMAL,
4407 access_rx_lookup_des_part1_unc_cor_err_cnt),
4408[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4409 CNTR_NORMAL,
4410 access_rx_lookup_des_part1_unc_err_cnt),
4411[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4412 CNTR_NORMAL,
4413 access_rx_rbuf_next_free_buf_cor_err_cnt),
4414[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4415 CNTR_NORMAL,
4416 access_rx_rbuf_next_free_buf_unc_err_cnt),
4417[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4418 "RxRbufFlInitWrAddrParityErr", 0, 0,
4419 CNTR_NORMAL,
4420 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4421[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4422 0, CNTR_NORMAL,
4423 access_rx_rbuf_fl_initdone_parity_err_cnt),
4424[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4425 0, CNTR_NORMAL,
4426 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4427[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4428 CNTR_NORMAL,
4429 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4430[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4431 CNTR_NORMAL,
4432 access_rx_rbuf_empty_err_cnt),
4433[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4434 CNTR_NORMAL,
4435 access_rx_rbuf_full_err_cnt),
4436[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4437 CNTR_NORMAL,
4438 access_rbuf_bad_lookup_err_cnt),
4439[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4440 CNTR_NORMAL,
4441 access_rbuf_ctx_id_parity_err_cnt),
4442[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4443 CNTR_NORMAL,
4444 access_rbuf_csr_qeopdw_parity_err_cnt),
4445[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4446 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4447 CNTR_NORMAL,
4448 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4449[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4450 "RxRbufCsrQTlPtrParityErr", 0, 0,
4451 CNTR_NORMAL,
4452 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4453[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4454 0, CNTR_NORMAL,
4455 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4456[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4457 0, CNTR_NORMAL,
4458 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4459[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4460 0, 0, CNTR_NORMAL,
4461 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4462[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4463 0, CNTR_NORMAL,
4464 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4465[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4466 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4467 CNTR_NORMAL,
4468 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4469[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4470 0, CNTR_NORMAL,
4471 access_rx_rbuf_block_list_read_cor_err_cnt),
4472[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4473 0, CNTR_NORMAL,
4474 access_rx_rbuf_block_list_read_unc_err_cnt),
4475[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4476 CNTR_NORMAL,
4477 access_rx_rbuf_lookup_des_cor_err_cnt),
4478[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rx_rbuf_lookup_des_unc_err_cnt),
4481[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4482 "RxRbufLookupDesRegUncCorErr", 0, 0,
4483 CNTR_NORMAL,
4484 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4485[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4486 CNTR_NORMAL,
4487 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4488[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4489 CNTR_NORMAL,
4490 access_rx_rbuf_free_list_cor_err_cnt),
4491[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4492 CNTR_NORMAL,
4493 access_rx_rbuf_free_list_unc_err_cnt),
4494[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4495 CNTR_NORMAL,
4496 access_rx_rcv_fsm_encoding_err_cnt),
4497[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4498 CNTR_NORMAL,
4499 access_rx_dma_flag_cor_err_cnt),
4500[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4501 CNTR_NORMAL,
4502 access_rx_dma_flag_unc_err_cnt),
4503[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4504 CNTR_NORMAL,
4505 access_rx_dc_sop_eop_parity_err_cnt),
4506[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_rx_rcv_csr_parity_err_cnt),
4509[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4510 CNTR_NORMAL,
4511 access_rx_rcv_qp_map_table_cor_err_cnt),
4512[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4513 CNTR_NORMAL,
4514 access_rx_rcv_qp_map_table_unc_err_cnt),
4515[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4516 CNTR_NORMAL,
4517 access_rx_rcv_data_cor_err_cnt),
4518[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4519 CNTR_NORMAL,
4520 access_rx_rcv_data_unc_err_cnt),
4521[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4522 CNTR_NORMAL,
4523 access_rx_rcv_hdr_cor_err_cnt),
4524[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4525 CNTR_NORMAL,
4526 access_rx_rcv_hdr_unc_err_cnt),
4527[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4528 CNTR_NORMAL,
4529 access_rx_dc_intf_parity_err_cnt),
4530[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4531 CNTR_NORMAL,
4532 access_rx_dma_csr_cor_err_cnt),
4533/* SendPioErrStatus */
4534[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_pio_pec_sop_head_parity_err_cnt),
4537[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4538 CNTR_NORMAL,
4539 access_pio_pcc_sop_head_parity_err_cnt),
4540[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4541 0, 0, CNTR_NORMAL,
4542 access_pio_last_returned_cnt_parity_err_cnt),
4543[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4544 0, CNTR_NORMAL,
4545 access_pio_current_free_cnt_parity_err_cnt),
4546[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4547 CNTR_NORMAL,
4548 access_pio_reserved_31_err_cnt),
4549[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4550 CNTR_NORMAL,
4551 access_pio_reserved_30_err_cnt),
4552[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_pio_ppmc_sop_len_err_cnt),
4555[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4556 CNTR_NORMAL,
4557 access_pio_ppmc_bqc_mem_parity_err_cnt),
4558[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4559 CNTR_NORMAL,
4560 access_pio_vl_fifo_parity_err_cnt),
4561[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4562 CNTR_NORMAL,
4563 access_pio_vlf_sop_parity_err_cnt),
4564[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4565 CNTR_NORMAL,
4566 access_pio_vlf_v1_len_parity_err_cnt),
4567[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4568 CNTR_NORMAL,
4569 access_pio_block_qw_count_parity_err_cnt),
4570[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4571 CNTR_NORMAL,
4572 access_pio_write_qw_valid_parity_err_cnt),
4573[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4574 CNTR_NORMAL,
4575 access_pio_state_machine_err_cnt),
4576[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4577 CNTR_NORMAL,
4578 access_pio_write_data_parity_err_cnt),
4579[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4580 CNTR_NORMAL,
4581 access_pio_host_addr_mem_cor_err_cnt),
4582[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4583 CNTR_NORMAL,
4584 access_pio_host_addr_mem_unc_err_cnt),
4585[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4586 CNTR_NORMAL,
4587 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4588[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4589 CNTR_NORMAL,
4590 access_pio_init_sm_in_err_cnt),
4591[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4592 CNTR_NORMAL,
4593 access_pio_ppmc_pbl_fifo_err_cnt),
4594[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4595 0, CNTR_NORMAL,
4596 access_pio_credit_ret_fifo_parity_err_cnt),
4597[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_pio_v1_len_mem_bank1_cor_err_cnt),
4600[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_pio_v1_len_mem_bank0_cor_err_cnt),
4603[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4604 CNTR_NORMAL,
4605 access_pio_v1_len_mem_bank1_unc_err_cnt),
4606[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4607 CNTR_NORMAL,
4608 access_pio_v1_len_mem_bank0_unc_err_cnt),
4609[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4610 CNTR_NORMAL,
4611 access_pio_sm_pkt_reset_parity_err_cnt),
4612[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_pio_pkt_evict_fifo_parity_err_cnt),
4615[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4616 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4617 CNTR_NORMAL,
4618 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4619[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4620 CNTR_NORMAL,
4621 access_pio_sbrdctl_crrel_parity_err_cnt),
4622[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4623 CNTR_NORMAL,
4624 access_pio_pec_fifo_parity_err_cnt),
4625[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4626 CNTR_NORMAL,
4627 access_pio_pcc_fifo_parity_err_cnt),
4628[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4629 CNTR_NORMAL,
4630 access_pio_sb_mem_fifo1_err_cnt),
4631[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4632 CNTR_NORMAL,
4633 access_pio_sb_mem_fifo0_err_cnt),
4634[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4635 CNTR_NORMAL,
4636 access_pio_csr_parity_err_cnt),
4637[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4638 CNTR_NORMAL,
4639 access_pio_write_addr_parity_err_cnt),
4640[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4641 CNTR_NORMAL,
4642 access_pio_write_bad_ctxt_err_cnt),
4643/* SendDmaErrStatus */
4644[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4645 0, CNTR_NORMAL,
4646 access_sdma_pcie_req_tracking_cor_err_cnt),
4647[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4648 0, CNTR_NORMAL,
4649 access_sdma_pcie_req_tracking_unc_err_cnt),
4650[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4651 CNTR_NORMAL,
4652 access_sdma_csr_parity_err_cnt),
4653[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4654 CNTR_NORMAL,
4655 access_sdma_rpy_tag_err_cnt),
4656/* SendEgressErrStatus */
4657[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4658 CNTR_NORMAL,
4659 access_tx_read_pio_memory_csr_unc_err_cnt),
4660[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4661 0, CNTR_NORMAL,
4662 access_tx_read_sdma_memory_csr_err_cnt),
4663[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_tx_egress_fifo_cor_err_cnt),
4666[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4667 CNTR_NORMAL,
4668 access_tx_read_pio_memory_cor_err_cnt),
4669[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4670 CNTR_NORMAL,
4671 access_tx_read_sdma_memory_cor_err_cnt),
4672[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4673 CNTR_NORMAL,
4674 access_tx_sb_hdr_cor_err_cnt),
4675[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4676 CNTR_NORMAL,
4677 access_tx_credit_overrun_err_cnt),
4678[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4679 CNTR_NORMAL,
4680 access_tx_launch_fifo8_cor_err_cnt),
4681[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4682 CNTR_NORMAL,
4683 access_tx_launch_fifo7_cor_err_cnt),
4684[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4685 CNTR_NORMAL,
4686 access_tx_launch_fifo6_cor_err_cnt),
4687[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4688 CNTR_NORMAL,
4689 access_tx_launch_fifo5_cor_err_cnt),
4690[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4691 CNTR_NORMAL,
4692 access_tx_launch_fifo4_cor_err_cnt),
4693[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4694 CNTR_NORMAL,
4695 access_tx_launch_fifo3_cor_err_cnt),
4696[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4697 CNTR_NORMAL,
4698 access_tx_launch_fifo2_cor_err_cnt),
4699[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4700 CNTR_NORMAL,
4701 access_tx_launch_fifo1_cor_err_cnt),
4702[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4703 CNTR_NORMAL,
4704 access_tx_launch_fifo0_cor_err_cnt),
4705[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4706 CNTR_NORMAL,
4707 access_tx_credit_return_vl_err_cnt),
4708[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4709 CNTR_NORMAL,
4710 access_tx_hcrc_insertion_err_cnt),
4711[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4712 CNTR_NORMAL,
4713 access_tx_egress_fifo_unc_err_cnt),
4714[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4715 CNTR_NORMAL,
4716 access_tx_read_pio_memory_unc_err_cnt),
4717[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4718 CNTR_NORMAL,
4719 access_tx_read_sdma_memory_unc_err_cnt),
4720[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4721 CNTR_NORMAL,
4722 access_tx_sb_hdr_unc_err_cnt),
4723[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4724 CNTR_NORMAL,
4725 access_tx_credit_return_partiy_err_cnt),
4726[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4727 0, 0, CNTR_NORMAL,
4728 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4729[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4730 0, 0, CNTR_NORMAL,
4731 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4732[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4733 0, 0, CNTR_NORMAL,
4734 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4735[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4736 0, 0, CNTR_NORMAL,
4737 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4738[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4739 0, 0, CNTR_NORMAL,
4740 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4741[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4742 0, 0, CNTR_NORMAL,
4743 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4744[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4745 0, 0, CNTR_NORMAL,
4746 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4747[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4748 0, 0, CNTR_NORMAL,
4749 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4750[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4751 0, 0, CNTR_NORMAL,
4752 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4753[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4754 0, 0, CNTR_NORMAL,
4755 access_tx_sdma15_disallowed_packet_err_cnt),
4756[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4757 0, 0, CNTR_NORMAL,
4758 access_tx_sdma14_disallowed_packet_err_cnt),
4759[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4760 0, 0, CNTR_NORMAL,
4761 access_tx_sdma13_disallowed_packet_err_cnt),
4762[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4763 0, 0, CNTR_NORMAL,
4764 access_tx_sdma12_disallowed_packet_err_cnt),
4765[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4766 0, 0, CNTR_NORMAL,
4767 access_tx_sdma11_disallowed_packet_err_cnt),
4768[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4769 0, 0, CNTR_NORMAL,
4770 access_tx_sdma10_disallowed_packet_err_cnt),
4771[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4772 0, 0, CNTR_NORMAL,
4773 access_tx_sdma9_disallowed_packet_err_cnt),
4774[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4775 0, 0, CNTR_NORMAL,
4776 access_tx_sdma8_disallowed_packet_err_cnt),
4777[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4778 0, 0, CNTR_NORMAL,
4779 access_tx_sdma7_disallowed_packet_err_cnt),
4780[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4781 0, 0, CNTR_NORMAL,
4782 access_tx_sdma6_disallowed_packet_err_cnt),
4783[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4784 0, 0, CNTR_NORMAL,
4785 access_tx_sdma5_disallowed_packet_err_cnt),
4786[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4787 0, 0, CNTR_NORMAL,
4788 access_tx_sdma4_disallowed_packet_err_cnt),
4789[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4790 0, 0, CNTR_NORMAL,
4791 access_tx_sdma3_disallowed_packet_err_cnt),
4792[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4793 0, 0, CNTR_NORMAL,
4794 access_tx_sdma2_disallowed_packet_err_cnt),
4795[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4796 0, 0, CNTR_NORMAL,
4797 access_tx_sdma1_disallowed_packet_err_cnt),
4798[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4799 0, 0, CNTR_NORMAL,
4800 access_tx_sdma0_disallowed_packet_err_cnt),
4801[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4802 CNTR_NORMAL,
4803 access_tx_config_parity_err_cnt),
4804[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4805 CNTR_NORMAL,
4806 access_tx_sbrd_ctl_csr_parity_err_cnt),
4807[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4808 CNTR_NORMAL,
4809 access_tx_launch_csr_parity_err_cnt),
4810[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4811 CNTR_NORMAL,
4812 access_tx_illegal_vl_err_cnt),
4813[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4814 "TxSbrdCtlStateMachineParityErr", 0, 0,
4815 CNTR_NORMAL,
4816 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4817[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4818 CNTR_NORMAL,
4819 access_egress_reserved_10_err_cnt),
4820[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4821 CNTR_NORMAL,
4822 access_egress_reserved_9_err_cnt),
4823[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4824 0, 0, CNTR_NORMAL,
4825 access_tx_sdma_launch_intf_parity_err_cnt),
4826[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4827 CNTR_NORMAL,
4828 access_tx_pio_launch_intf_parity_err_cnt),
4829[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4830 CNTR_NORMAL,
4831 access_egress_reserved_6_err_cnt),
4832[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4833 CNTR_NORMAL,
4834 access_tx_incorrect_link_state_err_cnt),
4835[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4836 CNTR_NORMAL,
4837 access_tx_linkdown_err_cnt),
4838[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4839 "EgressFifoUnderrunOrParityErr", 0, 0,
4840 CNTR_NORMAL,
4841 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4842[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4843 CNTR_NORMAL,
4844 access_egress_reserved_2_err_cnt),
4845[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4846 CNTR_NORMAL,
4847 access_tx_pkt_integrity_mem_unc_err_cnt),
4848[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4849 CNTR_NORMAL,
4850 access_tx_pkt_integrity_mem_cor_err_cnt),
4851/* SendErrStatus */
4852[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4853 CNTR_NORMAL,
4854 access_send_csr_write_bad_addr_err_cnt),
4855[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4856 CNTR_NORMAL,
4857 access_send_csr_read_bad_addr_err_cnt),
4858[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4859 CNTR_NORMAL,
4860 access_send_csr_parity_cnt),
4861/* SendCtxtErrStatus */
4862[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4863 CNTR_NORMAL,
4864 access_pio_write_out_of_bounds_err_cnt),
4865[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4866 CNTR_NORMAL,
4867 access_pio_write_overflow_err_cnt),
4868[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4869 0, 0, CNTR_NORMAL,
4870 access_pio_write_crosses_boundary_err_cnt),
4871[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4872 CNTR_NORMAL,
4873 access_pio_disallowed_packet_err_cnt),
4874[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4875 CNTR_NORMAL,
4876 access_pio_inconsistent_sop_err_cnt),
4877/* SendDmaEngErrStatus */
4878[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4879 0, 0, CNTR_NORMAL,
4880 access_sdma_header_request_fifo_cor_err_cnt),
4881[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4882 CNTR_NORMAL,
4883 access_sdma_header_storage_cor_err_cnt),
4884[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4885 CNTR_NORMAL,
4886 access_sdma_packet_tracking_cor_err_cnt),
4887[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4888 CNTR_NORMAL,
4889 access_sdma_assembly_cor_err_cnt),
4890[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4891 CNTR_NORMAL,
4892 access_sdma_desc_table_cor_err_cnt),
4893[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4894 0, 0, CNTR_NORMAL,
4895 access_sdma_header_request_fifo_unc_err_cnt),
4896[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4897 CNTR_NORMAL,
4898 access_sdma_header_storage_unc_err_cnt),
4899[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4900 CNTR_NORMAL,
4901 access_sdma_packet_tracking_unc_err_cnt),
4902[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4903 CNTR_NORMAL,
4904 access_sdma_assembly_unc_err_cnt),
4905[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4906 CNTR_NORMAL,
4907 access_sdma_desc_table_unc_err_cnt),
4908[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4909 CNTR_NORMAL,
4910 access_sdma_timeout_err_cnt),
4911[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4912 CNTR_NORMAL,
4913 access_sdma_header_length_err_cnt),
4914[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4915 CNTR_NORMAL,
4916 access_sdma_header_address_err_cnt),
4917[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4918 CNTR_NORMAL,
4919 access_sdma_header_select_err_cnt),
4920[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4921 CNTR_NORMAL,
4922 access_sdma_reserved_9_err_cnt),
4923[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4924 CNTR_NORMAL,
4925 access_sdma_packet_desc_overflow_err_cnt),
4926[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4927 CNTR_NORMAL,
4928 access_sdma_length_mismatch_err_cnt),
4929[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4930 CNTR_NORMAL,
4931 access_sdma_halt_err_cnt),
4932[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4933 CNTR_NORMAL,
4934 access_sdma_mem_read_err_cnt),
4935[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4936 CNTR_NORMAL,
4937 access_sdma_first_desc_err_cnt),
4938[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4939 CNTR_NORMAL,
4940 access_sdma_tail_out_of_bounds_err_cnt),
4941[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4942 CNTR_NORMAL,
4943 access_sdma_too_long_err_cnt),
4944[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4945 CNTR_NORMAL,
4946 access_sdma_gen_mismatch_err_cnt),
4947[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4948 CNTR_NORMAL,
4949 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004950};
4951
4952static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4953[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4954 CNTR_NORMAL),
4955[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4956 CNTR_NORMAL),
4957[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4958 CNTR_NORMAL),
4959[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4960 CNTR_NORMAL),
4961[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4962 CNTR_NORMAL),
4963[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4964 CNTR_NORMAL),
4965[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4966 CNTR_NORMAL),
4967[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4968[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4969[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4970[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4971 CNTR_SYNTH | CNTR_VL),
4972[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4973 CNTR_SYNTH | CNTR_VL),
4974[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4975 CNTR_SYNTH | CNTR_VL),
4976[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4977[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4978[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4979 access_sw_link_dn_cnt),
4980[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4981 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004982[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4983 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004984[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4985 access_sw_xmit_discards),
4986[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4987 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4988 access_sw_xmit_discards),
4989[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4990 access_xmit_constraint_errs),
4991[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4992 access_rcv_constraint_errs),
4993[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4994[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4995[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4996[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4997[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4998[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4999[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5000[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5001[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5002[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5003[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5004[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5005[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5006 access_sw_cpu_rc_acks),
5007[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5008 access_sw_cpu_rc_qacks),
5009[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5010 access_sw_cpu_rc_delayed_comp),
5011[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5012[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5013[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5014[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5015[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5016[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5017[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5018[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5019[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5020[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5021[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5022[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5023[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5024[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5025[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5026[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5027[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5028[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5029[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5030[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5031[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5032[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5033[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5034[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5035[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5036[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5037[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5038[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5039[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5040[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5041[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5042[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5043[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5044[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5045[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5046[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5047[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5048[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5049[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5050[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5051[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5052[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5053[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5054[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5055[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5056[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5057[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5058[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5059[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5060[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5061[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5062[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5063[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5064[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5065[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5066[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5067[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5068[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5069[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5070[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5071[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5072[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5073[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5074[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5075[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5076[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5077[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5078[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5079[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5080[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5081[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5082[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5083[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5084[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5085[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5086[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5087[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5088[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5089[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5090[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5091};
5092
5093/* ======================================================================== */
5094
Mike Marciniszyn77241052015-07-30 15:17:43 -04005095/* return true if this is chip revision revision a */
5096int is_ax(struct hfi1_devdata *dd)
5097{
5098 u8 chip_rev_minor =
5099 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5100 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5101 return (chip_rev_minor & 0xf0) == 0;
5102}
5103
5104/* return true if this is chip revision revision b */
5105int is_bx(struct hfi1_devdata *dd)
5106{
5107 u8 chip_rev_minor =
5108 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5109 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005110 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005111}
5112
5113/*
5114 * Append string s to buffer buf. Arguments curp and len are the current
5115 * position and remaining length, respectively.
5116 *
5117 * return 0 on success, 1 on out of room
5118 */
5119static int append_str(char *buf, char **curp, int *lenp, const char *s)
5120{
5121 char *p = *curp;
5122 int len = *lenp;
5123 int result = 0; /* success */
5124 char c;
5125
5126 /* add a comma, if first in the buffer */
5127 if (p != buf) {
5128 if (len == 0) {
5129 result = 1; /* out of room */
5130 goto done;
5131 }
5132 *p++ = ',';
5133 len--;
5134 }
5135
5136 /* copy the string */
5137 while ((c = *s++) != 0) {
5138 if (len == 0) {
5139 result = 1; /* out of room */
5140 goto done;
5141 }
5142 *p++ = c;
5143 len--;
5144 }
5145
5146done:
5147 /* write return values */
5148 *curp = p;
5149 *lenp = len;
5150
5151 return result;
5152}
5153
5154/*
5155 * Using the given flag table, print a comma separated string into
5156 * the buffer. End in '*' if the buffer is too short.
5157 */
5158static char *flag_string(char *buf, int buf_len, u64 flags,
5159 struct flag_table *table, int table_size)
5160{
5161 char extra[32];
5162 char *p = buf;
5163 int len = buf_len;
5164 int no_room = 0;
5165 int i;
5166
5167 /* make sure there is at least 2 so we can form "*" */
5168 if (len < 2)
5169 return "";
5170
5171 len--; /* leave room for a nul */
5172 for (i = 0; i < table_size; i++) {
5173 if (flags & table[i].flag) {
5174 no_room = append_str(buf, &p, &len, table[i].str);
5175 if (no_room)
5176 break;
5177 flags &= ~table[i].flag;
5178 }
5179 }
5180
5181 /* any undocumented bits left? */
5182 if (!no_room && flags) {
5183 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5184 no_room = append_str(buf, &p, &len, extra);
5185 }
5186
5187 /* add * if ran out of room */
5188 if (no_room) {
5189 /* may need to back up to add space for a '*' */
5190 if (len == 0)
5191 --p;
5192 *p++ = '*';
5193 }
5194
5195 /* add final nul - space already allocated above */
5196 *p = 0;
5197 return buf;
5198}
5199
5200/* first 8 CCE error interrupt source names */
5201static const char * const cce_misc_names[] = {
5202 "CceErrInt", /* 0 */
5203 "RxeErrInt", /* 1 */
5204 "MiscErrInt", /* 2 */
5205 "Reserved3", /* 3 */
5206 "PioErrInt", /* 4 */
5207 "SDmaErrInt", /* 5 */
5208 "EgressErrInt", /* 6 */
5209 "TxeErrInt" /* 7 */
5210};
5211
5212/*
5213 * Return the miscellaneous error interrupt name.
5214 */
5215static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5216{
5217 if (source < ARRAY_SIZE(cce_misc_names))
5218 strncpy(buf, cce_misc_names[source], bsize);
5219 else
5220 snprintf(buf,
5221 bsize,
5222 "Reserved%u",
5223 source + IS_GENERAL_ERR_START);
5224
5225 return buf;
5226}
5227
5228/*
5229 * Return the SDMA engine error interrupt name.
5230 */
5231static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5232{
5233 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5234 return buf;
5235}
5236
5237/*
5238 * Return the send context error interrupt name.
5239 */
5240static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5241{
5242 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5243 return buf;
5244}
5245
5246static const char * const various_names[] = {
5247 "PbcInt",
5248 "GpioAssertInt",
5249 "Qsfp1Int",
5250 "Qsfp2Int",
5251 "TCritInt"
5252};
5253
5254/*
5255 * Return the various interrupt name.
5256 */
5257static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5258{
5259 if (source < ARRAY_SIZE(various_names))
5260 strncpy(buf, various_names[source], bsize);
5261 else
5262 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5263 return buf;
5264}
5265
5266/*
5267 * Return the DC interrupt name.
5268 */
5269static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5270{
5271 static const char * const dc_int_names[] = {
5272 "common",
5273 "lcb",
5274 "8051",
5275 "lbm" /* local block merge */
5276 };
5277
5278 if (source < ARRAY_SIZE(dc_int_names))
5279 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5280 else
5281 snprintf(buf, bsize, "DCInt%u", source);
5282 return buf;
5283}
5284
5285static const char * const sdma_int_names[] = {
5286 "SDmaInt",
5287 "SdmaIdleInt",
5288 "SdmaProgressInt",
5289};
5290
5291/*
5292 * Return the SDMA engine interrupt name.
5293 */
5294static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5295{
5296 /* what interrupt */
5297 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5298 /* which engine */
5299 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5300
5301 if (likely(what < 3))
5302 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5303 else
5304 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5305 return buf;
5306}
5307
5308/*
5309 * Return the receive available interrupt name.
5310 */
5311static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5312{
5313 snprintf(buf, bsize, "RcvAvailInt%u", source);
5314 return buf;
5315}
5316
5317/*
5318 * Return the receive urgent interrupt name.
5319 */
5320static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5321{
5322 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5323 return buf;
5324}
5325
5326/*
5327 * Return the send credit interrupt name.
5328 */
5329static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5330{
5331 snprintf(buf, bsize, "SendCreditInt%u", source);
5332 return buf;
5333}
5334
5335/*
5336 * Return the reserved interrupt name.
5337 */
5338static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5339{
5340 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5341 return buf;
5342}
5343
5344static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5345{
5346 return flag_string(buf, buf_len, flags,
5347 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5348}
5349
5350static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5351{
5352 return flag_string(buf, buf_len, flags,
5353 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5354}
5355
5356static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5357{
5358 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5359 ARRAY_SIZE(misc_err_status_flags));
5360}
5361
5362static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5363{
5364 return flag_string(buf, buf_len, flags,
5365 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5366}
5367
5368static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5369{
5370 return flag_string(buf, buf_len, flags,
5371 sdma_err_status_flags,
5372 ARRAY_SIZE(sdma_err_status_flags));
5373}
5374
5375static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5376{
5377 return flag_string(buf, buf_len, flags,
5378 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5379}
5380
5381static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5382{
5383 return flag_string(buf, buf_len, flags,
5384 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5385}
5386
5387static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5388{
5389 return flag_string(buf, buf_len, flags,
5390 send_err_status_flags,
5391 ARRAY_SIZE(send_err_status_flags));
5392}
5393
5394static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5395{
5396 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005397 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005398
5399 /*
5400 * For most these errors, there is nothing that can be done except
5401 * report or record it.
5402 */
5403 dd_dev_info(dd, "CCE Error: %s\n",
5404 cce_err_status_string(buf, sizeof(buf), reg));
5405
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005406 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5407 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005408 /* this error requires a manual drop into SPC freeze mode */
5409 /* then a fix up */
5410 start_freeze_handling(dd->pport, FREEZE_SELF);
5411 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005412
5413 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5414 if (reg & (1ull << i)) {
5415 incr_cntr64(&dd->cce_err_status_cnt[i]);
5416 /* maintain a counter over all cce_err_status errors */
5417 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5418 }
5419 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005420}
5421
5422/*
5423 * Check counters for receive errors that do not have an interrupt
5424 * associated with them.
5425 */
5426#define RCVERR_CHECK_TIME 10
5427static void update_rcverr_timer(unsigned long opaque)
5428{
5429 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5430 struct hfi1_pportdata *ppd = dd->pport;
5431 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5432
5433 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5434 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5435 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5436 set_link_down_reason(ppd,
5437 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5438 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5439 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5440 }
5441 dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5442
5443 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5444}
5445
5446static int init_rcverr(struct hfi1_devdata *dd)
5447{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305448 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005449 /* Assume the hardware counter has been reset */
5450 dd->rcv_ovfl_cnt = 0;
5451 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5452}
5453
5454static void free_rcverr(struct hfi1_devdata *dd)
5455{
5456 if (dd->rcverr_timer.data)
5457 del_timer_sync(&dd->rcverr_timer);
5458 dd->rcverr_timer.data = 0;
5459}
5460
5461static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5462{
5463 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005464 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005465
5466 dd_dev_info(dd, "Receive Error: %s\n",
5467 rxe_err_status_string(buf, sizeof(buf), reg));
5468
5469 if (reg & ALL_RXE_FREEZE_ERR) {
5470 int flags = 0;
5471
5472 /*
5473 * Freeze mode recovery is disabled for the errors
5474 * in RXE_FREEZE_ABORT_MASK
5475 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005476 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005477 flags = FREEZE_ABORT;
5478
5479 start_freeze_handling(dd->pport, flags);
5480 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005481
5482 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5483 if (reg & (1ull << i))
5484 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5485 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005486}
5487
5488static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5489{
5490 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005491 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005492
5493 dd_dev_info(dd, "Misc Error: %s",
5494 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005495 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5496 if (reg & (1ull << i))
5497 incr_cntr64(&dd->misc_err_status_cnt[i]);
5498 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005499}
5500
5501static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5502{
5503 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005504 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005505
5506 dd_dev_info(dd, "PIO Error: %s\n",
5507 pio_err_status_string(buf, sizeof(buf), reg));
5508
5509 if (reg & ALL_PIO_FREEZE_ERR)
5510 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005511
5512 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5513 if (reg & (1ull << i))
5514 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5515 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005516}
5517
5518static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5519{
5520 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005521 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005522
5523 dd_dev_info(dd, "SDMA Error: %s\n",
5524 sdma_err_status_string(buf, sizeof(buf), reg));
5525
5526 if (reg & ALL_SDMA_FREEZE_ERR)
5527 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005528
5529 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5530 if (reg & (1ull << i))
5531 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5532 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005533}
5534
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005535static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5536{
5537 incr_cntr64(&ppd->port_xmit_discards);
5538}
5539
Mike Marciniszyn77241052015-07-30 15:17:43 -04005540static void count_port_inactive(struct hfi1_devdata *dd)
5541{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005542 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005543}
5544
5545/*
5546 * We have had a "disallowed packet" error during egress. Determine the
5547 * integrity check which failed, and update relevant error counter, etc.
5548 *
5549 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5550 * bit of state per integrity check, and so we can miss the reason for an
5551 * egress error if more than one packet fails the same integrity check
5552 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5553 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005554static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5555 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005556{
5557 struct hfi1_pportdata *ppd = dd->pport;
5558 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5559 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5560 char buf[96];
5561
5562 /* clear down all observed info as quickly as possible after read */
5563 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5564
5565 dd_dev_info(dd,
5566 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5567 info, egress_err_info_string(buf, sizeof(buf), info), src);
5568
5569 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005570 if (info & PORT_DISCARD_EGRESS_ERRS) {
5571 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005572
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005573 /*
5574 * Count all, in case multiple bits are set. Reminder:
5575 * since there is only one info register for many sources,
5576 * these may be attributed to the wrong VL if they occur
5577 * too close together.
5578 */
5579 weight = hweight64(info);
5580 for (i = 0; i < weight; i++) {
5581 __count_port_discards(ppd);
5582 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5583 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5584 else if (vl == 15)
5585 incr_cntr64(&ppd->port_xmit_discards_vl
5586 [C_VL_15]);
5587 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005588 }
5589}
5590
5591/*
5592 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5593 * register. Does it represent a 'port inactive' error?
5594 */
5595static inline int port_inactive_err(u64 posn)
5596{
5597 return (posn >= SEES(TX_LINKDOWN) &&
5598 posn <= SEES(TX_INCORRECT_LINK_STATE));
5599}
5600
5601/*
5602 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5603 * register. Does it represent a 'disallowed packet' error?
5604 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005605static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005606{
5607 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5608 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5609}
5610
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005611/*
5612 * Input value is a bit position of one of the SDMA engine disallowed
5613 * packet errors. Return which engine. Use of this must be guarded by
5614 * disallowed_pkt_err().
5615 */
5616static inline int disallowed_pkt_engine(int posn)
5617{
5618 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5619}
5620
5621/*
5622 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5623 * be done.
5624 */
5625static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5626{
5627 struct sdma_vl_map *m;
5628 int vl;
5629
5630 /* range check */
5631 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5632 return -1;
5633
5634 rcu_read_lock();
5635 m = rcu_dereference(dd->sdma_map);
5636 vl = m->engine_to_vl[engine];
5637 rcu_read_unlock();
5638
5639 return vl;
5640}
5641
5642/*
5643 * Translate the send context (sofware index) into a VL. Return -1 if the
5644 * translation cannot be done.
5645 */
5646static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5647{
5648 struct send_context_info *sci;
5649 struct send_context *sc;
5650 int i;
5651
5652 sci = &dd->send_contexts[sw_index];
5653
5654 /* there is no information for user (PSM) and ack contexts */
5655 if (sci->type != SC_KERNEL)
5656 return -1;
5657
5658 sc = sci->sc;
5659 if (!sc)
5660 return -1;
5661 if (dd->vld[15].sc == sc)
5662 return 15;
5663 for (i = 0; i < num_vls; i++)
5664 if (dd->vld[i].sc == sc)
5665 return i;
5666
5667 return -1;
5668}
5669
Mike Marciniszyn77241052015-07-30 15:17:43 -04005670static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5671{
5672 u64 reg_copy = reg, handled = 0;
5673 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005674 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005675
5676 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5677 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005678 else if (is_ax(dd) &&
5679 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5680 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005681 start_freeze_handling(dd->pport, 0);
5682
5683 while (reg_copy) {
5684 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005685 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005686 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005687 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005688
5689 if (port_inactive_err(shift)) {
5690 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005691 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005692 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005693 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5694
5695 handle_send_egress_err_info(dd, vl);
5696 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005697 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005698 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005699 }
5700
5701 reg &= ~handled;
5702
5703 if (reg)
5704 dd_dev_info(dd, "Egress Error: %s\n",
5705 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005706
5707 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5708 if (reg & (1ull << i))
5709 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5710 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005711}
5712
5713static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5714{
5715 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005716 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005717
5718 dd_dev_info(dd, "Send Error: %s\n",
5719 send_err_status_string(buf, sizeof(buf), reg));
5720
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005721 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5722 if (reg & (1ull << i))
5723 incr_cntr64(&dd->send_err_status_cnt[i]);
5724 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005725}
5726
5727/*
5728 * The maximum number of times the error clear down will loop before
5729 * blocking a repeating error. This value is arbitrary.
5730 */
5731#define MAX_CLEAR_COUNT 20
5732
5733/*
5734 * Clear and handle an error register. All error interrupts are funneled
5735 * through here to have a central location to correctly handle single-
5736 * or multi-shot errors.
5737 *
5738 * For non per-context registers, call this routine with a context value
5739 * of 0 so the per-context offset is zero.
5740 *
5741 * If the handler loops too many times, assume that something is wrong
5742 * and can't be fixed, so mask the error bits.
5743 */
5744static void interrupt_clear_down(struct hfi1_devdata *dd,
5745 u32 context,
5746 const struct err_reg_info *eri)
5747{
5748 u64 reg;
5749 u32 count;
5750
5751 /* read in a loop until no more errors are seen */
5752 count = 0;
5753 while (1) {
5754 reg = read_kctxt_csr(dd, context, eri->status);
5755 if (reg == 0)
5756 break;
5757 write_kctxt_csr(dd, context, eri->clear, reg);
5758 if (likely(eri->handler))
5759 eri->handler(dd, context, reg);
5760 count++;
5761 if (count > MAX_CLEAR_COUNT) {
5762 u64 mask;
5763
5764 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5765 eri->desc, reg);
5766 /*
5767 * Read-modify-write so any other masked bits
5768 * remain masked.
5769 */
5770 mask = read_kctxt_csr(dd, context, eri->mask);
5771 mask &= ~reg;
5772 write_kctxt_csr(dd, context, eri->mask, mask);
5773 break;
5774 }
5775 }
5776}
5777
5778/*
5779 * CCE block "misc" interrupt. Source is < 16.
5780 */
5781static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5782{
5783 const struct err_reg_info *eri = &misc_errs[source];
5784
5785 if (eri->handler) {
5786 interrupt_clear_down(dd, 0, eri);
5787 } else {
5788 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5789 source);
5790 }
5791}
5792
5793static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5794{
5795 return flag_string(buf, buf_len, flags,
5796 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5797}
5798
5799/*
5800 * Send context error interrupt. Source (hw_context) is < 160.
5801 *
5802 * All send context errors cause the send context to halt. The normal
5803 * clear-down mechanism cannot be used because we cannot clear the
5804 * error bits until several other long-running items are done first.
5805 * This is OK because with the context halted, nothing else is going
5806 * to happen on it anyway.
5807 */
5808static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5809 unsigned int hw_context)
5810{
5811 struct send_context_info *sci;
5812 struct send_context *sc;
5813 char flags[96];
5814 u64 status;
5815 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005816 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005817
5818 sw_index = dd->hw_to_sw[hw_context];
5819 if (sw_index >= dd->num_send_contexts) {
5820 dd_dev_err(dd,
5821 "out of range sw index %u for send context %u\n",
5822 sw_index, hw_context);
5823 return;
5824 }
5825 sci = &dd->send_contexts[sw_index];
5826 sc = sci->sc;
5827 if (!sc) {
5828 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5829 sw_index, hw_context);
5830 return;
5831 }
5832
5833 /* tell the software that a halt has begun */
5834 sc_stop(sc, SCF_HALTED);
5835
5836 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5837
5838 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5839 send_context_err_status_string(flags, sizeof(flags), status));
5840
5841 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005842 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005843
5844 /*
5845 * Automatically restart halted kernel contexts out of interrupt
5846 * context. User contexts must ask the driver to restart the context.
5847 */
5848 if (sc->type != SC_USER)
5849 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005850
5851 /*
5852 * Update the counters for the corresponding status bits.
5853 * Note that these particular counters are aggregated over all
5854 * 160 contexts.
5855 */
5856 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5857 if (status & (1ull << i))
5858 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5859 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005860}
5861
5862static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5863 unsigned int source, u64 status)
5864{
5865 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005866 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005867
5868 sde = &dd->per_sdma[source];
5869#ifdef CONFIG_SDMA_VERBOSITY
5870 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5871 slashstrip(__FILE__), __LINE__, __func__);
5872 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5873 sde->this_idx, source, (unsigned long long)status);
5874#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005875 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005876 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005877
5878 /*
5879 * Update the counters for the corresponding status bits.
5880 * Note that these particular counters are aggregated over
5881 * all 16 DMA engines.
5882 */
5883 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5884 if (status & (1ull << i))
5885 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5886 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005887}
5888
5889/*
5890 * CCE block SDMA error interrupt. Source is < 16.
5891 */
5892static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5893{
5894#ifdef CONFIG_SDMA_VERBOSITY
5895 struct sdma_engine *sde = &dd->per_sdma[source];
5896
5897 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5898 slashstrip(__FILE__), __LINE__, __func__);
5899 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5900 source);
5901 sdma_dumpstate(sde);
5902#endif
5903 interrupt_clear_down(dd, source, &sdma_eng_err);
5904}
5905
5906/*
5907 * CCE block "various" interrupt. Source is < 8.
5908 */
5909static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5910{
5911 const struct err_reg_info *eri = &various_err[source];
5912
5913 /*
5914 * TCritInt cannot go through interrupt_clear_down()
5915 * because it is not a second tier interrupt. The handler
5916 * should be called directly.
5917 */
5918 if (source == TCRIT_INT_SOURCE)
5919 handle_temp_err(dd);
5920 else if (eri->handler)
5921 interrupt_clear_down(dd, 0, eri);
5922 else
5923 dd_dev_info(dd,
5924 "%s: Unimplemented/reserved interrupt %d\n",
5925 __func__, source);
5926}
5927
5928static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5929{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005930 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005931 struct hfi1_pportdata *ppd = dd->pport;
5932 unsigned long flags;
5933 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5934
5935 if (reg & QSFP_HFI0_MODPRST_N) {
5936
5937 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5938 __func__);
5939
5940 if (!qsfp_mod_present(ppd)) {
5941 ppd->driver_link_ready = 0;
5942 /*
5943 * Cable removed, reset all our information about the
5944 * cache and cable capabilities
5945 */
5946
5947 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5948 /*
5949 * We don't set cache_refresh_required here as we expect
5950 * an interrupt when a cable is inserted
5951 */
5952 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005953 ppd->qsfp_info.reset_needed = 0;
5954 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005955 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5956 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005957 /* Invert the ModPresent pin now to detect plug-in */
5958 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5959 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005960
5961 if ((ppd->offline_disabled_reason >
5962 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005963 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08005964 (ppd->offline_disabled_reason ==
5965 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5966 ppd->offline_disabled_reason =
5967 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005968 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005969
Mike Marciniszyn77241052015-07-30 15:17:43 -04005970 if (ppd->host_link_state == HLS_DN_POLL) {
5971 /*
5972 * The link is still in POLL. This means
5973 * that the normal link down processing
5974 * will not happen. We have to do it here
5975 * before turning the DC off.
5976 */
5977 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5978 }
5979 } else {
5980 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5981 ppd->qsfp_info.cache_valid = 0;
5982 ppd->qsfp_info.cache_refresh_required = 1;
5983 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5984 flags);
5985
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005986 /*
5987 * Stop inversion of ModPresent pin to detect
5988 * removal of the cable
5989 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005990 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005991 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5992 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5993
5994 ppd->offline_disabled_reason =
5995 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005996 }
5997 }
5998
5999 if (reg & QSFP_HFI0_INT_N) {
6000
6001 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
6002 __func__);
6003 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6004 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006005 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6006 }
6007
6008 /* Schedule the QSFP work only if there is a cable attached. */
6009 if (qsfp_mod_present(ppd))
6010 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6011}
6012
6013static int request_host_lcb_access(struct hfi1_devdata *dd)
6014{
6015 int ret;
6016
6017 ret = do_8051_command(dd, HCMD_MISC,
6018 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6019 NULL);
6020 if (ret != HCMD_SUCCESS) {
6021 dd_dev_err(dd, "%s: command failed with error %d\n",
6022 __func__, ret);
6023 }
6024 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6025}
6026
6027static int request_8051_lcb_access(struct hfi1_devdata *dd)
6028{
6029 int ret;
6030
6031 ret = do_8051_command(dd, HCMD_MISC,
6032 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6033 NULL);
6034 if (ret != HCMD_SUCCESS) {
6035 dd_dev_err(dd, "%s: command failed with error %d\n",
6036 __func__, ret);
6037 }
6038 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6039}
6040
6041/*
6042 * Set the LCB selector - allow host access. The DCC selector always
6043 * points to the host.
6044 */
6045static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6046{
6047 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6048 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
6049 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6050}
6051
6052/*
6053 * Clear the LCB selector - allow 8051 access. The DCC selector always
6054 * points to the host.
6055 */
6056static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6057{
6058 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6059 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6060}
6061
6062/*
6063 * Acquire LCB access from the 8051. If the host already has access,
6064 * just increment a counter. Otherwise, inform the 8051 that the
6065 * host is taking access.
6066 *
6067 * Returns:
6068 * 0 on success
6069 * -EBUSY if the 8051 has control and cannot be disturbed
6070 * -errno if unable to acquire access from the 8051
6071 */
6072int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6073{
6074 struct hfi1_pportdata *ppd = dd->pport;
6075 int ret = 0;
6076
6077 /*
6078 * Use the host link state lock so the operation of this routine
6079 * { link state check, selector change, count increment } can occur
6080 * as a unit against a link state change. Otherwise there is a
6081 * race between the state change and the count increment.
6082 */
6083 if (sleep_ok) {
6084 mutex_lock(&ppd->hls_lock);
6085 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006086 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006087 udelay(1);
6088 }
6089
6090 /* this access is valid only when the link is up */
6091 if ((ppd->host_link_state & HLS_UP) == 0) {
6092 dd_dev_info(dd, "%s: link state %s not up\n",
6093 __func__, link_state_name(ppd->host_link_state));
6094 ret = -EBUSY;
6095 goto done;
6096 }
6097
6098 if (dd->lcb_access_count == 0) {
6099 ret = request_host_lcb_access(dd);
6100 if (ret) {
6101 dd_dev_err(dd,
6102 "%s: unable to acquire LCB access, err %d\n",
6103 __func__, ret);
6104 goto done;
6105 }
6106 set_host_lcb_access(dd);
6107 }
6108 dd->lcb_access_count++;
6109done:
6110 mutex_unlock(&ppd->hls_lock);
6111 return ret;
6112}
6113
6114/*
6115 * Release LCB access by decrementing the use count. If the count is moving
6116 * from 1 to 0, inform 8051 that it has control back.
6117 *
6118 * Returns:
6119 * 0 on success
6120 * -errno if unable to release access to the 8051
6121 */
6122int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6123{
6124 int ret = 0;
6125
6126 /*
6127 * Use the host link state lock because the acquire needed it.
6128 * Here, we only need to keep { selector change, count decrement }
6129 * as a unit.
6130 */
6131 if (sleep_ok) {
6132 mutex_lock(&dd->pport->hls_lock);
6133 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006134 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006135 udelay(1);
6136 }
6137
6138 if (dd->lcb_access_count == 0) {
6139 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6140 __func__);
6141 goto done;
6142 }
6143
6144 if (dd->lcb_access_count == 1) {
6145 set_8051_lcb_access(dd);
6146 ret = request_8051_lcb_access(dd);
6147 if (ret) {
6148 dd_dev_err(dd,
6149 "%s: unable to release LCB access, err %d\n",
6150 __func__, ret);
6151 /* restore host access if the grant didn't work */
6152 set_host_lcb_access(dd);
6153 goto done;
6154 }
6155 }
6156 dd->lcb_access_count--;
6157done:
6158 mutex_unlock(&dd->pport->hls_lock);
6159 return ret;
6160}
6161
6162/*
6163 * Initialize LCB access variables and state. Called during driver load,
6164 * after most of the initialization is finished.
6165 *
6166 * The DC default is LCB access on for the host. The driver defaults to
6167 * leaving access to the 8051. Assign access now - this constrains the call
6168 * to this routine to be after all LCB set-up is done. In particular, after
6169 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6170 */
6171static void init_lcb_access(struct hfi1_devdata *dd)
6172{
6173 dd->lcb_access_count = 0;
6174}
6175
6176/*
6177 * Write a response back to a 8051 request.
6178 */
6179static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6180{
6181 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6182 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6183 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6184 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6185}
6186
6187/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006188 * Handle host requests from the 8051.
6189 *
6190 * This is a work-queue function outside of the interrupt.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006191 */
Easwar Hariharancbac3862016-02-03 14:31:31 -08006192void handle_8051_request(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006193{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006194 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6195 dc_host_req_work);
6196 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006197 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006198 u16 data = 0;
6199 u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6200 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
Mike Marciniszyn77241052015-07-30 15:17:43 -04006201
6202 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6203 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6204 return; /* no request */
6205
6206 /* zero out COMPLETED so the response is seen */
6207 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6208
6209 /* extract request details */
6210 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6211 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6212 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6213 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6214
6215 switch (type) {
6216 case HREQ_LOAD_CONFIG:
6217 case HREQ_SAVE_CONFIG:
6218 case HREQ_READ_CONFIG:
6219 case HREQ_SET_TX_EQ_ABS:
6220 case HREQ_SET_TX_EQ_REL:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006221 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6222 type);
6223 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6224 break;
6225
Easwar Hariharancbac3862016-02-03 14:31:31 -08006226 case HREQ_ENABLE:
6227 lanes = data & 0xF;
6228 for (i = 0; lanes; lanes >>= 1, i++) {
6229 if (!(lanes & 1))
6230 continue;
6231 if (data & 0x200) {
6232 /* enable TX CDR */
6233 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6234 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6235 cdr_ctrl_byte |= (1 << (i + 4));
6236 } else {
6237 /* disable TX CDR */
6238 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6239 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6240 cdr_ctrl_byte &= ~(1 << (i + 4));
6241 }
6242
6243 if (data & 0x800) {
6244 /* enable RX CDR */
6245 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6246 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6247 cdr_ctrl_byte |= (1 << i);
6248 } else {
6249 /* disable RX CDR */
6250 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6251 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6252 cdr_ctrl_byte &= ~(1 << i);
6253 }
6254 }
6255 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6256 &cdr_ctrl_byte, 1);
6257 hreq_response(dd, HREQ_SUCCESS, data);
6258 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6259 break;
6260
Mike Marciniszyn77241052015-07-30 15:17:43 -04006261 case HREQ_CONFIG_DONE:
6262 hreq_response(dd, HREQ_SUCCESS, 0);
6263 break;
6264
6265 case HREQ_INTERFACE_TEST:
6266 hreq_response(dd, HREQ_SUCCESS, data);
6267 break;
6268
6269 default:
6270 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6271 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6272 break;
6273 }
6274}
6275
6276static void write_global_credit(struct hfi1_devdata *dd,
6277 u8 vau, u16 total, u16 shared)
6278{
6279 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6280 ((u64)total
6281 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6282 | ((u64)shared
6283 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6284 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6285}
6286
6287/*
6288 * Set up initial VL15 credits of the remote. Assumes the rest of
6289 * the CM credit registers are zero from a previous global or credit reset .
6290 */
6291void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6292{
6293 /* leave shared count at zero for both global and VL15 */
6294 write_global_credit(dd, vau, vl15buf, 0);
6295
6296 /* We may need some credits for another VL when sending packets
6297 * with the snoop interface. Dividing it down the middle for VL15
6298 * and VL0 should suffice.
6299 */
6300 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6301 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6302 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6303 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6304 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6305 } else {
6306 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6307 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6308 }
6309}
6310
6311/*
6312 * Zero all credit details from the previous connection and
6313 * reset the CM manager's internal counters.
6314 */
6315void reset_link_credits(struct hfi1_devdata *dd)
6316{
6317 int i;
6318
6319 /* remove all previous VL credit limits */
6320 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6321 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6322 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6323 write_global_credit(dd, 0, 0, 0);
6324 /* reset the CM block */
6325 pio_send_control(dd, PSC_CM_RESET);
6326}
6327
6328/* convert a vCU to a CU */
6329static u32 vcu_to_cu(u8 vcu)
6330{
6331 return 1 << vcu;
6332}
6333
6334/* convert a CU to a vCU */
6335static u8 cu_to_vcu(u32 cu)
6336{
6337 return ilog2(cu);
6338}
6339
6340/* convert a vAU to an AU */
6341static u32 vau_to_au(u8 vau)
6342{
6343 return 8 * (1 << vau);
6344}
6345
6346static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6347{
6348 ppd->sm_trap_qp = 0x0;
6349 ppd->sa_qp = 0x1;
6350}
6351
6352/*
6353 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6354 */
6355static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6356{
6357 u64 reg;
6358
6359 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6360 write_csr(dd, DC_LCB_CFG_RUN, 0);
6361 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6362 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6363 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6364 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6365 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6366 reg = read_csr(dd, DCC_CFG_RESET);
6367 write_csr(dd, DCC_CFG_RESET,
6368 reg
6369 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6370 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6371 (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6372 if (!abort) {
6373 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6374 write_csr(dd, DCC_CFG_RESET, reg);
6375 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6376 }
6377}
6378
6379/*
6380 * This routine should be called after the link has been transitioned to
6381 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6382 * reset).
6383 *
6384 * The expectation is that the caller of this routine would have taken
6385 * care of properly transitioning the link into the correct state.
6386 */
6387static void dc_shutdown(struct hfi1_devdata *dd)
6388{
6389 unsigned long flags;
6390
6391 spin_lock_irqsave(&dd->dc8051_lock, flags);
6392 if (dd->dc_shutdown) {
6393 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6394 return;
6395 }
6396 dd->dc_shutdown = 1;
6397 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6398 /* Shutdown the LCB */
6399 lcb_shutdown(dd, 1);
6400 /* Going to OFFLINE would have causes the 8051 to put the
6401 * SerDes into reset already. Just need to shut down the 8051,
6402 * itself. */
6403 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6404}
6405
6406/* Calling this after the DC has been brought out of reset should not
6407 * do any damage. */
6408static void dc_start(struct hfi1_devdata *dd)
6409{
6410 unsigned long flags;
6411 int ret;
6412
6413 spin_lock_irqsave(&dd->dc8051_lock, flags);
6414 if (!dd->dc_shutdown)
6415 goto done;
6416 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6417 /* Take the 8051 out of reset */
6418 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6419 /* Wait until 8051 is ready */
6420 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6421 if (ret) {
6422 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6423 __func__);
6424 }
6425 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6426 write_csr(dd, DCC_CFG_RESET, 0x10);
6427 /* lcb_shutdown() with abort=1 does not restore these */
6428 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6429 spin_lock_irqsave(&dd->dc8051_lock, flags);
6430 dd->dc_shutdown = 0;
6431done:
6432 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6433}
6434
6435/*
6436 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6437 */
6438static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6439{
6440 u64 rx_radr, tx_radr;
6441 u32 version;
6442
6443 if (dd->icode != ICODE_FPGA_EMULATION)
6444 return;
6445
6446 /*
6447 * These LCB defaults on emulator _s are good, nothing to do here:
6448 * LCB_CFG_TX_FIFOS_RADR
6449 * LCB_CFG_RX_FIFOS_RADR
6450 * LCB_CFG_LN_DCLK
6451 * LCB_CFG_IGNORE_LOST_RCLK
6452 */
6453 if (is_emulator_s(dd))
6454 return;
6455 /* else this is _p */
6456
6457 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006458 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006459 version = 0x2d; /* all B0 use 0x2d or higher settings */
6460
6461 if (version <= 0x12) {
6462 /* release 0x12 and below */
6463
6464 /*
6465 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6466 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6467 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6468 */
6469 rx_radr =
6470 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6471 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6472 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6473 /*
6474 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6475 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6476 */
6477 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6478 } else if (version <= 0x18) {
6479 /* release 0x13 up to 0x18 */
6480 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6481 rx_radr =
6482 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6483 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6484 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6485 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6486 } else if (version == 0x19) {
6487 /* release 0x19 */
6488 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6489 rx_radr =
6490 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6491 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6492 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6493 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6494 } else if (version == 0x1a) {
6495 /* release 0x1a */
6496 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6497 rx_radr =
6498 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6499 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6500 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6501 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6502 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6503 } else {
6504 /* release 0x1b and higher */
6505 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6506 rx_radr =
6507 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6508 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6509 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6510 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6511 }
6512
6513 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6514 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6515 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6516 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6517 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6518}
6519
6520/*
6521 * Handle a SMA idle message
6522 *
6523 * This is a work-queue function outside of the interrupt.
6524 */
6525void handle_sma_message(struct work_struct *work)
6526{
6527 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6528 sma_message_work);
6529 struct hfi1_devdata *dd = ppd->dd;
6530 u64 msg;
6531 int ret;
6532
6533 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6534 is stripped off */
6535 ret = read_idle_sma(dd, &msg);
6536 if (ret)
6537 return;
6538 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6539 /*
6540 * React to the SMA message. Byte[1] (0 for us) is the command.
6541 */
6542 switch (msg & 0xff) {
6543 case SMA_IDLE_ARM:
6544 /*
6545 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6546 * State Transitions
6547 *
6548 * Only expected in INIT or ARMED, discard otherwise.
6549 */
6550 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6551 ppd->neighbor_normal = 1;
6552 break;
6553 case SMA_IDLE_ACTIVE:
6554 /*
6555 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6556 * State Transitions
6557 *
6558 * Can activate the node. Discard otherwise.
6559 */
6560 if (ppd->host_link_state == HLS_UP_ARMED
6561 && ppd->is_active_optimize_enabled) {
6562 ppd->neighbor_normal = 1;
6563 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6564 if (ret)
6565 dd_dev_err(
6566 dd,
6567 "%s: received Active SMA idle message, couldn't set link to Active\n",
6568 __func__);
6569 }
6570 break;
6571 default:
6572 dd_dev_err(dd,
6573 "%s: received unexpected SMA idle message 0x%llx\n",
6574 __func__, msg);
6575 break;
6576 }
6577}
6578
6579static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6580{
6581 u64 rcvctrl;
6582 unsigned long flags;
6583
6584 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6585 rcvctrl = read_csr(dd, RCV_CTRL);
6586 rcvctrl |= add;
6587 rcvctrl &= ~clear;
6588 write_csr(dd, RCV_CTRL, rcvctrl);
6589 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6590}
6591
6592static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6593{
6594 adjust_rcvctrl(dd, add, 0);
6595}
6596
6597static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6598{
6599 adjust_rcvctrl(dd, 0, clear);
6600}
6601
6602/*
6603 * Called from all interrupt handlers to start handling an SPC freeze.
6604 */
6605void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6606{
6607 struct hfi1_devdata *dd = ppd->dd;
6608 struct send_context *sc;
6609 int i;
6610
6611 if (flags & FREEZE_SELF)
6612 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6613
6614 /* enter frozen mode */
6615 dd->flags |= HFI1_FROZEN;
6616
6617 /* notify all SDMA engines that they are going into a freeze */
6618 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6619
6620 /* do halt pre-handling on all enabled send contexts */
6621 for (i = 0; i < dd->num_send_contexts; i++) {
6622 sc = dd->send_contexts[i].sc;
6623 if (sc && (sc->flags & SCF_ENABLED))
6624 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6625 }
6626
6627 /* Send context are frozen. Notify user space */
6628 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6629
6630 if (flags & FREEZE_ABORT) {
6631 dd_dev_err(dd,
6632 "Aborted freeze recovery. Please REBOOT system\n");
6633 return;
6634 }
6635 /* queue non-interrupt handler */
6636 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6637}
6638
6639/*
6640 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6641 * depending on the "freeze" parameter.
6642 *
6643 * No need to return an error if it times out, our only option
6644 * is to proceed anyway.
6645 */
6646static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6647{
6648 unsigned long timeout;
6649 u64 reg;
6650
6651 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6652 while (1) {
6653 reg = read_csr(dd, CCE_STATUS);
6654 if (freeze) {
6655 /* waiting until all indicators are set */
6656 if ((reg & ALL_FROZE) == ALL_FROZE)
6657 return; /* all done */
6658 } else {
6659 /* waiting until all indicators are clear */
6660 if ((reg & ALL_FROZE) == 0)
6661 return; /* all done */
6662 }
6663
6664 if (time_after(jiffies, timeout)) {
6665 dd_dev_err(dd,
6666 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6667 freeze ? "" : "un",
6668 reg & ALL_FROZE,
6669 freeze ? ALL_FROZE : 0ull);
6670 return;
6671 }
6672 usleep_range(80, 120);
6673 }
6674}
6675
6676/*
6677 * Do all freeze handling for the RXE block.
6678 */
6679static void rxe_freeze(struct hfi1_devdata *dd)
6680{
6681 int i;
6682
6683 /* disable port */
6684 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6685
6686 /* disable all receive contexts */
6687 for (i = 0; i < dd->num_rcv_contexts; i++)
6688 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6689}
6690
6691/*
6692 * Unfreeze handling for the RXE block - kernel contexts only.
6693 * This will also enable the port. User contexts will do unfreeze
6694 * handling on a per-context basis as they call into the driver.
6695 *
6696 */
6697static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6698{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006699 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006700 int i;
6701
6702 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006703 for (i = 0; i < dd->n_krcv_queues; i++) {
6704 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6705 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6706 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6707 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6708 hfi1_rcvctrl(dd, rcvmask, i);
6709 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006710
6711 /* enable port */
6712 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6713}
6714
6715/*
6716 * Non-interrupt SPC freeze handling.
6717 *
6718 * This is a work-queue function outside of the triggering interrupt.
6719 */
6720void handle_freeze(struct work_struct *work)
6721{
6722 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6723 freeze_work);
6724 struct hfi1_devdata *dd = ppd->dd;
6725
6726 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006727 wait_for_freeze_status(dd, 1);
6728
6729 /* SPC is now frozen */
6730
6731 /* do send PIO freeze steps */
6732 pio_freeze(dd);
6733
6734 /* do send DMA freeze steps */
6735 sdma_freeze(dd);
6736
6737 /* do send egress freeze steps - nothing to do */
6738
6739 /* do receive freeze steps */
6740 rxe_freeze(dd);
6741
6742 /*
6743 * Unfreeze the hardware - clear the freeze, wait for each
6744 * block's frozen bit to clear, then clear the frozen flag.
6745 */
6746 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6747 wait_for_freeze_status(dd, 0);
6748
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006749 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006750 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6751 wait_for_freeze_status(dd, 1);
6752 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6753 wait_for_freeze_status(dd, 0);
6754 }
6755
6756 /* do send PIO unfreeze steps for kernel contexts */
6757 pio_kernel_unfreeze(dd);
6758
6759 /* do send DMA unfreeze steps */
6760 sdma_unfreeze(dd);
6761
6762 /* do send egress unfreeze steps - nothing to do */
6763
6764 /* do receive unfreeze steps for kernel contexts */
6765 rxe_kernel_unfreeze(dd);
6766
6767 /*
6768 * The unfreeze procedure touches global device registers when
6769 * it disables and re-enables RXE. Mark the device unfrozen
6770 * after all that is done so other parts of the driver waiting
6771 * for the device to unfreeze don't do things out of order.
6772 *
6773 * The above implies that the meaning of HFI1_FROZEN flag is
6774 * "Device has gone into freeze mode and freeze mode handling
6775 * is still in progress."
6776 *
6777 * The flag will be removed when freeze mode processing has
6778 * completed.
6779 */
6780 dd->flags &= ~HFI1_FROZEN;
6781 wake_up(&dd->event_queue);
6782
6783 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006784}
6785
6786/*
6787 * Handle a link up interrupt from the 8051.
6788 *
6789 * This is a work-queue function outside of the interrupt.
6790 */
6791void handle_link_up(struct work_struct *work)
6792{
6793 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6794 link_up_work);
6795 set_link_state(ppd, HLS_UP_INIT);
6796
6797 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6798 read_ltp_rtt(ppd->dd);
6799 /*
6800 * OPA specifies that certain counters are cleared on a transition
6801 * to link up, so do that.
6802 */
6803 clear_linkup_counters(ppd->dd);
6804 /*
6805 * And (re)set link up default values.
6806 */
6807 set_linkup_defaults(ppd);
6808
6809 /* enforce link speed enabled */
6810 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6811 /* oops - current speed is not enabled, bounce */
6812 dd_dev_err(ppd->dd,
6813 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6814 ppd->link_speed_active, ppd->link_speed_enabled);
6815 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6816 OPA_LINKDOWN_REASON_SPEED_POLICY);
6817 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006818 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006819 start_link(ppd);
6820 }
6821}
6822
6823/* Several pieces of LNI information were cached for SMA in ppd.
6824 * Reset these on link down */
6825static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6826{
6827 ppd->neighbor_guid = 0;
6828 ppd->neighbor_port_number = 0;
6829 ppd->neighbor_type = 0;
6830 ppd->neighbor_fm_security = 0;
6831}
6832
6833/*
6834 * Handle a link down interrupt from the 8051.
6835 *
6836 * This is a work-queue function outside of the interrupt.
6837 */
6838void handle_link_down(struct work_struct *work)
6839{
6840 u8 lcl_reason, neigh_reason = 0;
6841 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6842 link_down_work);
6843
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006844 if ((ppd->host_link_state &
6845 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6846 ppd->port_type == PORT_TYPE_FIXED)
6847 ppd->offline_disabled_reason =
6848 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6849
6850 /* Go offline first, then deal with reading/writing through 8051 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006851 set_link_state(ppd, HLS_DN_OFFLINE);
6852
6853 lcl_reason = 0;
6854 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6855
6856 /*
6857 * If no reason, assume peer-initiated but missed
6858 * LinkGoingDown idle flits.
6859 */
6860 if (neigh_reason == 0)
6861 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6862
6863 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6864
6865 reset_neighbor_info(ppd);
6866
6867 /* disable the port */
6868 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6869
6870 /* If there is no cable attached, turn the DC off. Otherwise,
6871 * start the link bring up. */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006872 if (!qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006873 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006874 } else {
6875 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006876 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006877 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006878}
6879
6880void handle_link_bounce(struct work_struct *work)
6881{
6882 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6883 link_bounce_work);
6884
6885 /*
6886 * Only do something if the link is currently up.
6887 */
6888 if (ppd->host_link_state & HLS_UP) {
6889 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006890 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006891 start_link(ppd);
6892 } else {
6893 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6894 __func__, link_state_name(ppd->host_link_state));
6895 }
6896}
6897
6898/*
6899 * Mask conversion: Capability exchange to Port LTP. The capability
6900 * exchange has an implicit 16b CRC that is mandatory.
6901 */
6902static int cap_to_port_ltp(int cap)
6903{
6904 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6905
6906 if (cap & CAP_CRC_14B)
6907 port_ltp |= PORT_LTP_CRC_MODE_14;
6908 if (cap & CAP_CRC_48B)
6909 port_ltp |= PORT_LTP_CRC_MODE_48;
6910 if (cap & CAP_CRC_12B_16B_PER_LANE)
6911 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6912
6913 return port_ltp;
6914}
6915
6916/*
6917 * Convert an OPA Port LTP mask to capability mask
6918 */
6919int port_ltp_to_cap(int port_ltp)
6920{
6921 int cap_mask = 0;
6922
6923 if (port_ltp & PORT_LTP_CRC_MODE_14)
6924 cap_mask |= CAP_CRC_14B;
6925 if (port_ltp & PORT_LTP_CRC_MODE_48)
6926 cap_mask |= CAP_CRC_48B;
6927 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6928 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6929
6930 return cap_mask;
6931}
6932
6933/*
6934 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6935 */
6936static int lcb_to_port_ltp(int lcb_crc)
6937{
6938 int port_ltp = 0;
6939
6940 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6941 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6942 else if (lcb_crc == LCB_CRC_48B)
6943 port_ltp = PORT_LTP_CRC_MODE_48;
6944 else if (lcb_crc == LCB_CRC_14B)
6945 port_ltp = PORT_LTP_CRC_MODE_14;
6946 else
6947 port_ltp = PORT_LTP_CRC_MODE_16;
6948
6949 return port_ltp;
6950}
6951
6952/*
6953 * Our neighbor has indicated that we are allowed to act as a fabric
6954 * manager, so place the full management partition key in the second
6955 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6956 * that we should already have the limited management partition key in
6957 * array element 1, and also that the port is not yet up when
6958 * add_full_mgmt_pkey() is invoked.
6959 */
6960static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6961{
6962 struct hfi1_devdata *dd = ppd->dd;
6963
Dean Luick87645222015-12-01 15:38:21 -05006964 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6965 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6966 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6967 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006968 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6969 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6970}
6971
6972/*
6973 * Convert the given link width to the OPA link width bitmask.
6974 */
6975static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6976{
6977 switch (width) {
6978 case 0:
6979 /*
6980 * Simulator and quick linkup do not set the width.
6981 * Just set it to 4x without complaint.
6982 */
6983 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6984 return OPA_LINK_WIDTH_4X;
6985 return 0; /* no lanes up */
6986 case 1: return OPA_LINK_WIDTH_1X;
6987 case 2: return OPA_LINK_WIDTH_2X;
6988 case 3: return OPA_LINK_WIDTH_3X;
6989 default:
6990 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6991 __func__, width);
6992 /* fall through */
6993 case 4: return OPA_LINK_WIDTH_4X;
6994 }
6995}
6996
6997/*
6998 * Do a population count on the bottom nibble.
6999 */
7000static const u8 bit_counts[16] = {
7001 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7002};
7003static inline u8 nibble_to_count(u8 nibble)
7004{
7005 return bit_counts[nibble & 0xf];
7006}
7007
7008/*
7009 * Read the active lane information from the 8051 registers and return
7010 * their widths.
7011 *
7012 * Active lane information is found in these 8051 registers:
7013 * enable_lane_tx
7014 * enable_lane_rx
7015 */
7016static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7017 u16 *rx_width)
7018{
7019 u16 tx, rx;
7020 u8 enable_lane_rx;
7021 u8 enable_lane_tx;
7022 u8 tx_polarity_inversion;
7023 u8 rx_polarity_inversion;
7024 u8 max_rate;
7025
7026 /* read the active lanes */
7027 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7028 &rx_polarity_inversion, &max_rate);
7029 read_local_lni(dd, &enable_lane_rx);
7030
7031 /* convert to counts */
7032 tx = nibble_to_count(enable_lane_tx);
7033 rx = nibble_to_count(enable_lane_rx);
7034
7035 /*
7036 * Set link_speed_active here, overriding what was set in
7037 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7038 * set the max_rate field in handle_verify_cap until v0.19.
7039 */
7040 if ((dd->icode == ICODE_RTL_SILICON)
7041 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
7042 /* max_rate: 0 = 12.5G, 1 = 25G */
7043 switch (max_rate) {
7044 case 0:
7045 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7046 break;
7047 default:
7048 dd_dev_err(dd,
7049 "%s: unexpected max rate %d, using 25Gb\n",
7050 __func__, (int)max_rate);
7051 /* fall through */
7052 case 1:
7053 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7054 break;
7055 }
7056 }
7057
7058 dd_dev_info(dd,
7059 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7060 enable_lane_tx, tx, enable_lane_rx, rx);
7061 *tx_width = link_width_to_bits(dd, tx);
7062 *rx_width = link_width_to_bits(dd, rx);
7063}
7064
7065/*
7066 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7067 * Valid after the end of VerifyCap and during LinkUp. Does not change
7068 * after link up. I.e. look elsewhere for downgrade information.
7069 *
7070 * Bits are:
7071 * + bits [7:4] contain the number of active transmitters
7072 * + bits [3:0] contain the number of active receivers
7073 * These are numbers 1 through 4 and can be different values if the
7074 * link is asymmetric.
7075 *
7076 * verify_cap_local_fm_link_width[0] retains its original value.
7077 */
7078static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7079 u16 *rx_width)
7080{
7081 u16 widths, tx, rx;
7082 u8 misc_bits, local_flags;
7083 u16 active_tx, active_rx;
7084
7085 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7086 tx = widths >> 12;
7087 rx = (widths >> 8) & 0xf;
7088
7089 *tx_width = link_width_to_bits(dd, tx);
7090 *rx_width = link_width_to_bits(dd, rx);
7091
7092 /* print the active widths */
7093 get_link_widths(dd, &active_tx, &active_rx);
7094}
7095
7096/*
7097 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7098 * hardware information when the link first comes up.
7099 *
7100 * The link width is not available until after VerifyCap.AllFramesReceived
7101 * (the trigger for handle_verify_cap), so this is outside that routine
7102 * and should be called when the 8051 signals linkup.
7103 */
7104void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7105{
7106 u16 tx_width, rx_width;
7107
7108 /* get end-of-LNI link widths */
7109 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7110
7111 /* use tx_width as the link is supposed to be symmetric on link up */
7112 ppd->link_width_active = tx_width;
7113 /* link width downgrade active (LWD.A) starts out matching LW.A */
7114 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7115 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7116 /* per OPA spec, on link up LWD.E resets to LWD.S */
7117 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7118 /* cache the active egress rate (units {10^6 bits/sec]) */
7119 ppd->current_egress_rate = active_egress_rate(ppd);
7120}
7121
7122/*
7123 * Handle a verify capabilities interrupt from the 8051.
7124 *
7125 * This is a work-queue function outside of the interrupt.
7126 */
7127void handle_verify_cap(struct work_struct *work)
7128{
7129 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7130 link_vc_work);
7131 struct hfi1_devdata *dd = ppd->dd;
7132 u64 reg;
7133 u8 power_management;
7134 u8 continious;
7135 u8 vcu;
7136 u8 vau;
7137 u8 z;
7138 u16 vl15buf;
7139 u16 link_widths;
7140 u16 crc_mask;
7141 u16 crc_val;
7142 u16 device_id;
7143 u16 active_tx, active_rx;
7144 u8 partner_supported_crc;
7145 u8 remote_tx_rate;
7146 u8 device_rev;
7147
7148 set_link_state(ppd, HLS_VERIFY_CAP);
7149
7150 lcb_shutdown(dd, 0);
7151 adjust_lcb_for_fpga_serdes(dd);
7152
7153 /*
7154 * These are now valid:
7155 * remote VerifyCap fields in the general LNI config
7156 * CSR DC8051_STS_REMOTE_GUID
7157 * CSR DC8051_STS_REMOTE_NODE_TYPE
7158 * CSR DC8051_STS_REMOTE_FM_SECURITY
7159 * CSR DC8051_STS_REMOTE_PORT_NO
7160 */
7161
7162 read_vc_remote_phy(dd, &power_management, &continious);
7163 read_vc_remote_fabric(
7164 dd,
7165 &vau,
7166 &z,
7167 &vcu,
7168 &vl15buf,
7169 &partner_supported_crc);
7170 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7171 read_remote_device_id(dd, &device_id, &device_rev);
7172 /*
7173 * And the 'MgmtAllowed' information, which is exchanged during
7174 * LNI, is also be available at this point.
7175 */
7176 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7177 /* print the active widths */
7178 get_link_widths(dd, &active_tx, &active_rx);
7179 dd_dev_info(dd,
7180 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7181 (int)power_management, (int)continious);
7182 dd_dev_info(dd,
7183 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7184 (int)vau,
7185 (int)z,
7186 (int)vcu,
7187 (int)vl15buf,
7188 (int)partner_supported_crc);
7189 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7190 (u32)remote_tx_rate, (u32)link_widths);
7191 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7192 (u32)device_id, (u32)device_rev);
7193 /*
7194 * The peer vAU value just read is the peer receiver value. HFI does
7195 * not support a transmit vAU of 0 (AU == 8). We advertised that
7196 * with Z=1 in the fabric capabilities sent to the peer. The peer
7197 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7198 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7199 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7200 * subject to the Z value exception.
7201 */
7202 if (vau == 0)
7203 vau = 1;
7204 set_up_vl15(dd, vau, vl15buf);
7205
7206 /* set up the LCB CRC mode */
7207 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7208
7209 /* order is important: use the lowest bit in common */
7210 if (crc_mask & CAP_CRC_14B)
7211 crc_val = LCB_CRC_14B;
7212 else if (crc_mask & CAP_CRC_48B)
7213 crc_val = LCB_CRC_48B;
7214 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7215 crc_val = LCB_CRC_12B_16B_PER_LANE;
7216 else
7217 crc_val = LCB_CRC_16B;
7218
7219 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7220 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7221 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7222
7223 /* set (14b only) or clear sideband credit */
7224 reg = read_csr(dd, SEND_CM_CTRL);
7225 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7226 write_csr(dd, SEND_CM_CTRL,
7227 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7228 } else {
7229 write_csr(dd, SEND_CM_CTRL,
7230 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7231 }
7232
7233 ppd->link_speed_active = 0; /* invalid value */
7234 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7235 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7236 switch (remote_tx_rate) {
7237 case 0:
7238 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7239 break;
7240 case 1:
7241 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7242 break;
7243 }
7244 } else {
7245 /* actual rate is highest bit of the ANDed rates */
7246 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7247
7248 if (rate & 2)
7249 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7250 else if (rate & 1)
7251 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7252 }
7253 if (ppd->link_speed_active == 0) {
7254 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7255 __func__, (int)remote_tx_rate);
7256 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7257 }
7258
7259 /*
7260 * Cache the values of the supported, enabled, and active
7261 * LTP CRC modes to return in 'portinfo' queries. But the bit
7262 * flags that are returned in the portinfo query differ from
7263 * what's in the link_crc_mask, crc_sizes, and crc_val
7264 * variables. Convert these here.
7265 */
7266 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7267 /* supported crc modes */
7268 ppd->port_ltp_crc_mode |=
7269 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7270 /* enabled crc modes */
7271 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7272 /* active crc mode */
7273
7274 /* set up the remote credit return table */
7275 assign_remote_cm_au_table(dd, vcu);
7276
7277 /*
7278 * The LCB is reset on entry to handle_verify_cap(), so this must
7279 * be applied on every link up.
7280 *
7281 * Adjust LCB error kill enable to kill the link if
7282 * these RBUF errors are seen:
7283 * REPLAY_BUF_MBE_SMASK
7284 * FLIT_INPUT_BUF_MBE_SMASK
7285 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007286 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007287 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7288 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7289 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7290 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7291 }
7292
7293 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7294 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7295
7296 /* give 8051 access to the LCB CSRs */
7297 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7298 set_8051_lcb_access(dd);
7299
7300 ppd->neighbor_guid =
7301 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7302 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7303 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7304 ppd->neighbor_type =
7305 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7306 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7307 ppd->neighbor_fm_security =
7308 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7309 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7310 dd_dev_info(dd,
7311 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7312 ppd->neighbor_guid, ppd->neighbor_type,
7313 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7314 if (ppd->mgmt_allowed)
7315 add_full_mgmt_pkey(ppd);
7316
7317 /* tell the 8051 to go to LinkUp */
7318 set_link_state(ppd, HLS_GOING_UP);
7319}
7320
7321/*
7322 * Apply the link width downgrade enabled policy against the current active
7323 * link widths.
7324 *
7325 * Called when the enabled policy changes or the active link widths change.
7326 */
7327void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7328{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007329 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007330 int tries;
7331 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007332 u16 tx, rx;
7333
Dean Luick323fd782015-11-16 21:59:24 -05007334 /* use the hls lock to avoid a race with actual link up */
7335 tries = 0;
7336retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007337 mutex_lock(&ppd->hls_lock);
7338 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007339 if (!(ppd->host_link_state & HLS_UP)) {
7340 /* still going up..wait and retry */
7341 if (ppd->host_link_state & HLS_GOING_UP) {
7342 if (++tries < 1000) {
7343 mutex_unlock(&ppd->hls_lock);
7344 usleep_range(100, 120); /* arbitrary */
7345 goto retry;
7346 }
7347 dd_dev_err(ppd->dd,
7348 "%s: giving up waiting for link state change\n",
7349 __func__);
7350 }
7351 goto done;
7352 }
7353
7354 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007355
7356 if (refresh_widths) {
7357 get_link_widths(ppd->dd, &tx, &rx);
7358 ppd->link_width_downgrade_tx_active = tx;
7359 ppd->link_width_downgrade_rx_active = rx;
7360 }
7361
7362 if (lwde == 0) {
7363 /* downgrade is disabled */
7364
7365 /* bounce if not at starting active width */
7366 if ((ppd->link_width_active !=
7367 ppd->link_width_downgrade_tx_active)
7368 || (ppd->link_width_active !=
7369 ppd->link_width_downgrade_rx_active)) {
7370 dd_dev_err(ppd->dd,
7371 "Link downgrade is disabled and link has downgraded, downing link\n");
7372 dd_dev_err(ppd->dd,
7373 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7374 ppd->link_width_active,
7375 ppd->link_width_downgrade_tx_active,
7376 ppd->link_width_downgrade_rx_active);
7377 do_bounce = 1;
7378 }
7379 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7380 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7381 /* Tx or Rx is outside the enabled policy */
7382 dd_dev_err(ppd->dd,
7383 "Link is outside of downgrade allowed, downing link\n");
7384 dd_dev_err(ppd->dd,
7385 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7386 lwde,
7387 ppd->link_width_downgrade_tx_active,
7388 ppd->link_width_downgrade_rx_active);
7389 do_bounce = 1;
7390 }
7391
Dean Luick323fd782015-11-16 21:59:24 -05007392done:
7393 mutex_unlock(&ppd->hls_lock);
7394
Mike Marciniszyn77241052015-07-30 15:17:43 -04007395 if (do_bounce) {
7396 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7397 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7398 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007399 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007400 start_link(ppd);
7401 }
7402}
7403
7404/*
7405 * Handle a link downgrade interrupt from the 8051.
7406 *
7407 * This is a work-queue function outside of the interrupt.
7408 */
7409void handle_link_downgrade(struct work_struct *work)
7410{
7411 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7412 link_downgrade_work);
7413
7414 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7415 apply_link_downgrade_policy(ppd, 1);
7416}
7417
7418static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7419{
7420 return flag_string(buf, buf_len, flags, dcc_err_flags,
7421 ARRAY_SIZE(dcc_err_flags));
7422}
7423
7424static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7425{
7426 return flag_string(buf, buf_len, flags, lcb_err_flags,
7427 ARRAY_SIZE(lcb_err_flags));
7428}
7429
7430static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7431{
7432 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7433 ARRAY_SIZE(dc8051_err_flags));
7434}
7435
7436static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7437{
7438 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7439 ARRAY_SIZE(dc8051_info_err_flags));
7440}
7441
7442static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7443{
7444 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7445 ARRAY_SIZE(dc8051_info_host_msg_flags));
7446}
7447
7448static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7449{
7450 struct hfi1_pportdata *ppd = dd->pport;
7451 u64 info, err, host_msg;
7452 int queue_link_down = 0;
7453 char buf[96];
7454
7455 /* look at the flags */
7456 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7457 /* 8051 information set by firmware */
7458 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7459 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7460 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7461 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7462 host_msg = (info >>
7463 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7464 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7465
7466 /*
7467 * Handle error flags.
7468 */
7469 if (err & FAILED_LNI) {
7470 /*
7471 * LNI error indications are cleared by the 8051
7472 * only when starting polling. Only pay attention
7473 * to them when in the states that occur during
7474 * LNI.
7475 */
7476 if (ppd->host_link_state
7477 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7478 queue_link_down = 1;
7479 dd_dev_info(dd, "Link error: %s\n",
7480 dc8051_info_err_string(buf,
7481 sizeof(buf),
7482 err & FAILED_LNI));
7483 }
7484 err &= ~(u64)FAILED_LNI;
7485 }
Dean Luick6d014532015-12-01 15:38:23 -05007486 /* unknown frames can happen durning LNI, just count */
7487 if (err & UNKNOWN_FRAME) {
7488 ppd->unknown_frame_count++;
7489 err &= ~(u64)UNKNOWN_FRAME;
7490 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007491 if (err) {
7492 /* report remaining errors, but do not do anything */
7493 dd_dev_err(dd, "8051 info error: %s\n",
7494 dc8051_info_err_string(buf, sizeof(buf), err));
7495 }
7496
7497 /*
7498 * Handle host message flags.
7499 */
7500 if (host_msg & HOST_REQ_DONE) {
7501 /*
7502 * Presently, the driver does a busy wait for
7503 * host requests to complete. This is only an
7504 * informational message.
7505 * NOTE: The 8051 clears the host message
7506 * information *on the next 8051 command*.
7507 * Therefore, when linkup is achieved,
7508 * this flag will still be set.
7509 */
7510 host_msg &= ~(u64)HOST_REQ_DONE;
7511 }
7512 if (host_msg & BC_SMA_MSG) {
7513 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7514 host_msg &= ~(u64)BC_SMA_MSG;
7515 }
7516 if (host_msg & LINKUP_ACHIEVED) {
7517 dd_dev_info(dd, "8051: Link up\n");
7518 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7519 host_msg &= ~(u64)LINKUP_ACHIEVED;
7520 }
7521 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharancbac3862016-02-03 14:31:31 -08007522 queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007523 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7524 }
7525 if (host_msg & VERIFY_CAP_FRAME) {
7526 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7527 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7528 }
7529 if (host_msg & LINK_GOING_DOWN) {
7530 const char *extra = "";
7531 /* no downgrade action needed if going down */
7532 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7533 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7534 extra = " (ignoring downgrade)";
7535 }
7536 dd_dev_info(dd, "8051: Link down%s\n", extra);
7537 queue_link_down = 1;
7538 host_msg &= ~(u64)LINK_GOING_DOWN;
7539 }
7540 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7541 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7542 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7543 }
7544 if (host_msg) {
7545 /* report remaining messages, but do not do anything */
7546 dd_dev_info(dd, "8051 info host message: %s\n",
7547 dc8051_info_host_msg_string(buf, sizeof(buf),
7548 host_msg));
7549 }
7550
7551 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7552 }
7553 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7554 /*
7555 * Lost the 8051 heartbeat. If this happens, we
7556 * receive constant interrupts about it. Disable
7557 * the interrupt after the first.
7558 */
7559 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7560 write_csr(dd, DC_DC8051_ERR_EN,
7561 read_csr(dd, DC_DC8051_ERR_EN)
7562 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7563
7564 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7565 }
7566 if (reg) {
7567 /* report the error, but do not do anything */
7568 dd_dev_err(dd, "8051 error: %s\n",
7569 dc8051_err_string(buf, sizeof(buf), reg));
7570 }
7571
7572 if (queue_link_down) {
7573 /* if the link is already going down or disabled, do not
7574 * queue another */
7575 if ((ppd->host_link_state
7576 & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7577 || ppd->link_enabled == 0) {
7578 dd_dev_info(dd, "%s: not queuing link down\n",
7579 __func__);
7580 } else {
7581 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7582 }
7583 }
7584}
7585
7586static const char * const fm_config_txt[] = {
7587[0] =
7588 "BadHeadDist: Distance violation between two head flits",
7589[1] =
7590 "BadTailDist: Distance violation between two tail flits",
7591[2] =
7592 "BadCtrlDist: Distance violation between two credit control flits",
7593[3] =
7594 "BadCrdAck: Credits return for unsupported VL",
7595[4] =
7596 "UnsupportedVLMarker: Received VL Marker",
7597[5] =
7598 "BadPreempt: Exceeded the preemption nesting level",
7599[6] =
7600 "BadControlFlit: Received unsupported control flit",
7601/* no 7 */
7602[8] =
7603 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7604};
7605
7606static const char * const port_rcv_txt[] = {
7607[1] =
7608 "BadPktLen: Illegal PktLen",
7609[2] =
7610 "PktLenTooLong: Packet longer than PktLen",
7611[3] =
7612 "PktLenTooShort: Packet shorter than PktLen",
7613[4] =
7614 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7615[5] =
7616 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7617[6] =
7618 "BadL2: Illegal L2 opcode",
7619[7] =
7620 "BadSC: Unsupported SC",
7621[9] =
7622 "BadRC: Illegal RC",
7623[11] =
7624 "PreemptError: Preempting with same VL",
7625[12] =
7626 "PreemptVL15: Preempting a VL15 packet",
7627};
7628
7629#define OPA_LDR_FMCONFIG_OFFSET 16
7630#define OPA_LDR_PORTRCV_OFFSET 0
7631static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7632{
7633 u64 info, hdr0, hdr1;
7634 const char *extra;
7635 char buf[96];
7636 struct hfi1_pportdata *ppd = dd->pport;
7637 u8 lcl_reason = 0;
7638 int do_bounce = 0;
7639
7640 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7641 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7642 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7643 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7644 /* set status bit */
7645 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7646 }
7647 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7648 }
7649
7650 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7651 struct hfi1_pportdata *ppd = dd->pport;
7652 /* this counter saturates at (2^32) - 1 */
7653 if (ppd->link_downed < (u32)UINT_MAX)
7654 ppd->link_downed++;
7655 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7656 }
7657
7658 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7659 u8 reason_valid = 1;
7660
7661 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7662 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7663 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7664 /* set status bit */
7665 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7666 }
7667 switch (info) {
7668 case 0:
7669 case 1:
7670 case 2:
7671 case 3:
7672 case 4:
7673 case 5:
7674 case 6:
7675 extra = fm_config_txt[info];
7676 break;
7677 case 8:
7678 extra = fm_config_txt[info];
7679 if (ppd->port_error_action &
7680 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7681 do_bounce = 1;
7682 /*
7683 * lcl_reason cannot be derived from info
7684 * for this error
7685 */
7686 lcl_reason =
7687 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7688 }
7689 break;
7690 default:
7691 reason_valid = 0;
7692 snprintf(buf, sizeof(buf), "reserved%lld", info);
7693 extra = buf;
7694 break;
7695 }
7696
7697 if (reason_valid && !do_bounce) {
7698 do_bounce = ppd->port_error_action &
7699 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7700 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7701 }
7702
7703 /* just report this */
7704 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7705 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7706 }
7707
7708 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7709 u8 reason_valid = 1;
7710
7711 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7712 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7713 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7714 if (!(dd->err_info_rcvport.status_and_code &
7715 OPA_EI_STATUS_SMASK)) {
7716 dd->err_info_rcvport.status_and_code =
7717 info & OPA_EI_CODE_SMASK;
7718 /* set status bit */
7719 dd->err_info_rcvport.status_and_code |=
7720 OPA_EI_STATUS_SMASK;
7721 /* save first 2 flits in the packet that caused
7722 * the error */
7723 dd->err_info_rcvport.packet_flit1 = hdr0;
7724 dd->err_info_rcvport.packet_flit2 = hdr1;
7725 }
7726 switch (info) {
7727 case 1:
7728 case 2:
7729 case 3:
7730 case 4:
7731 case 5:
7732 case 6:
7733 case 7:
7734 case 9:
7735 case 11:
7736 case 12:
7737 extra = port_rcv_txt[info];
7738 break;
7739 default:
7740 reason_valid = 0;
7741 snprintf(buf, sizeof(buf), "reserved%lld", info);
7742 extra = buf;
7743 break;
7744 }
7745
7746 if (reason_valid && !do_bounce) {
7747 do_bounce = ppd->port_error_action &
7748 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7749 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7750 }
7751
7752 /* just report this */
7753 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7754 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7755 hdr0, hdr1);
7756
7757 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7758 }
7759
7760 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7761 /* informative only */
7762 dd_dev_info(dd, "8051 access to LCB blocked\n");
7763 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7764 }
7765 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7766 /* informative only */
7767 dd_dev_info(dd, "host access to LCB blocked\n");
7768 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7769 }
7770
7771 /* report any remaining errors */
7772 if (reg)
7773 dd_dev_info(dd, "DCC Error: %s\n",
7774 dcc_err_string(buf, sizeof(buf), reg));
7775
7776 if (lcl_reason == 0)
7777 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7778
7779 if (do_bounce) {
7780 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7781 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7782 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7783 }
7784}
7785
7786static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7787{
7788 char buf[96];
7789
7790 dd_dev_info(dd, "LCB Error: %s\n",
7791 lcb_err_string(buf, sizeof(buf), reg));
7792}
7793
7794/*
7795 * CCE block DC interrupt. Source is < 8.
7796 */
7797static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7798{
7799 const struct err_reg_info *eri = &dc_errs[source];
7800
7801 if (eri->handler) {
7802 interrupt_clear_down(dd, 0, eri);
7803 } else if (source == 3 /* dc_lbm_int */) {
7804 /*
7805 * This indicates that a parity error has occurred on the
7806 * address/control lines presented to the LBM. The error
7807 * is a single pulse, there is no associated error flag,
7808 * and it is non-maskable. This is because if a parity
7809 * error occurs on the request the request is dropped.
7810 * This should never occur, but it is nice to know if it
7811 * ever does.
7812 */
7813 dd_dev_err(dd, "Parity error in DC LBM block\n");
7814 } else {
7815 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7816 }
7817}
7818
7819/*
7820 * TX block send credit interrupt. Source is < 160.
7821 */
7822static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7823{
7824 sc_group_release_update(dd, source);
7825}
7826
7827/*
7828 * TX block SDMA interrupt. Source is < 48.
7829 *
7830 * SDMA interrupts are grouped by type:
7831 *
7832 * 0 - N-1 = SDma
7833 * N - 2N-1 = SDmaProgress
7834 * 2N - 3N-1 = SDmaIdle
7835 */
7836static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7837{
7838 /* what interrupt */
7839 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7840 /* which engine */
7841 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7842
7843#ifdef CONFIG_SDMA_VERBOSITY
7844 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7845 slashstrip(__FILE__), __LINE__, __func__);
7846 sdma_dumpstate(&dd->per_sdma[which]);
7847#endif
7848
7849 if (likely(what < 3 && which < dd->num_sdma)) {
7850 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7851 } else {
7852 /* should not happen */
7853 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7854 }
7855}
7856
7857/*
7858 * RX block receive available interrupt. Source is < 160.
7859 */
7860static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7861{
7862 struct hfi1_ctxtdata *rcd;
7863 char *err_detail;
7864
7865 if (likely(source < dd->num_rcv_contexts)) {
7866 rcd = dd->rcd[source];
7867 if (rcd) {
7868 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007869 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007870 else
7871 handle_user_interrupt(rcd);
7872 return; /* OK */
7873 }
7874 /* received an interrupt, but no rcd */
7875 err_detail = "dataless";
7876 } else {
7877 /* received an interrupt, but are not using that context */
7878 err_detail = "out of range";
7879 }
7880 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7881 err_detail, source);
7882}
7883
7884/*
7885 * RX block receive urgent interrupt. Source is < 160.
7886 */
7887static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7888{
7889 struct hfi1_ctxtdata *rcd;
7890 char *err_detail;
7891
7892 if (likely(source < dd->num_rcv_contexts)) {
7893 rcd = dd->rcd[source];
7894 if (rcd) {
7895 /* only pay attention to user urgent interrupts */
7896 if (source >= dd->first_user_ctxt)
7897 handle_user_interrupt(rcd);
7898 return; /* OK */
7899 }
7900 /* received an interrupt, but no rcd */
7901 err_detail = "dataless";
7902 } else {
7903 /* received an interrupt, but are not using that context */
7904 err_detail = "out of range";
7905 }
7906 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7907 err_detail, source);
7908}
7909
7910/*
7911 * Reserved range interrupt. Should not be called in normal operation.
7912 */
7913static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7914{
7915 char name[64];
7916
7917 dd_dev_err(dd, "unexpected %s interrupt\n",
7918 is_reserved_name(name, sizeof(name), source));
7919}
7920
7921static const struct is_table is_table[] = {
7922/* start end
7923 name func interrupt func */
7924{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7925 is_misc_err_name, is_misc_err_int },
7926{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7927 is_sdma_eng_err_name, is_sdma_eng_err_int },
7928{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7929 is_sendctxt_err_name, is_sendctxt_err_int },
7930{ IS_SDMA_START, IS_SDMA_END,
7931 is_sdma_eng_name, is_sdma_eng_int },
7932{ IS_VARIOUS_START, IS_VARIOUS_END,
7933 is_various_name, is_various_int },
7934{ IS_DC_START, IS_DC_END,
7935 is_dc_name, is_dc_int },
7936{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7937 is_rcv_avail_name, is_rcv_avail_int },
7938{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7939 is_rcv_urgent_name, is_rcv_urgent_int },
7940{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7941 is_send_credit_name, is_send_credit_int},
7942{ IS_RESERVED_START, IS_RESERVED_END,
7943 is_reserved_name, is_reserved_int},
7944};
7945
7946/*
7947 * Interrupt source interrupt - called when the given source has an interrupt.
7948 * Source is a bit index into an array of 64-bit integers.
7949 */
7950static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7951{
7952 const struct is_table *entry;
7953
7954 /* avoids a double compare by walking the table in-order */
7955 for (entry = &is_table[0]; entry->is_name; entry++) {
7956 if (source < entry->end) {
7957 trace_hfi1_interrupt(dd, entry, source);
7958 entry->is_int(dd, source - entry->start);
7959 return;
7960 }
7961 }
7962 /* fell off the end */
7963 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7964}
7965
7966/*
7967 * General interrupt handler. This is able to correctly handle
7968 * all interrupts in case INTx is used.
7969 */
7970static irqreturn_t general_interrupt(int irq, void *data)
7971{
7972 struct hfi1_devdata *dd = data;
7973 u64 regs[CCE_NUM_INT_CSRS];
7974 u32 bit;
7975 int i;
7976
7977 this_cpu_inc(*dd->int_counter);
7978
7979 /* phase 1: scan and clear all handled interrupts */
7980 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7981 if (dd->gi_mask[i] == 0) {
7982 regs[i] = 0; /* used later */
7983 continue;
7984 }
7985 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7986 dd->gi_mask[i];
7987 /* only clear if anything is set */
7988 if (regs[i])
7989 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7990 }
7991
7992 /* phase 2: call the appropriate handler */
7993 for_each_set_bit(bit, (unsigned long *)&regs[0],
7994 CCE_NUM_INT_CSRS*64) {
7995 is_interrupt(dd, bit);
7996 }
7997
7998 return IRQ_HANDLED;
7999}
8000
8001static irqreturn_t sdma_interrupt(int irq, void *data)
8002{
8003 struct sdma_engine *sde = data;
8004 struct hfi1_devdata *dd = sde->dd;
8005 u64 status;
8006
8007#ifdef CONFIG_SDMA_VERBOSITY
8008 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8009 slashstrip(__FILE__), __LINE__, __func__);
8010 sdma_dumpstate(sde);
8011#endif
8012
8013 this_cpu_inc(*dd->int_counter);
8014
8015 /* This read_csr is really bad in the hot path */
8016 status = read_csr(dd,
8017 CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
8018 & sde->imask;
8019 if (likely(status)) {
8020 /* clear the interrupt(s) */
8021 write_csr(dd,
8022 CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
8023 status);
8024
8025 /* handle the interrupt(s) */
8026 sdma_engine_interrupt(sde, status);
8027 } else
8028 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8029 sde->this_idx);
8030
8031 return IRQ_HANDLED;
8032}
8033
8034/*
Dean Luickecd42f82016-02-03 14:35:14 -08008035 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8036 * to insure that the write completed. This does NOT guarantee that
8037 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008038 */
8039static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8040{
8041 struct hfi1_devdata *dd = rcd->dd;
8042 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8043
8044 mmiowb(); /* make sure everything before is written */
8045 write_csr(dd, addr, rcd->imask);
8046 /* force the above write on the chip and get a value back */
8047 (void)read_csr(dd, addr);
8048}
8049
8050/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008051void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008052{
8053 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8054}
8055
Dean Luickecd42f82016-02-03 14:35:14 -08008056/*
8057 * Return non-zero if a packet is present.
8058 *
8059 * This routine is called when rechecking for packets after the RcvAvail
8060 * interrupt has been cleared down. First, do a quick check of memory for
8061 * a packet present. If not found, use an expensive CSR read of the context
8062 * tail to determine the actual tail. The CSR read is necessary because there
8063 * is no method to push pending DMAs to memory other than an interrupt and we
8064 * are trying to determine if we need to force an interrupt.
8065 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008066static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8067{
Dean Luickecd42f82016-02-03 14:35:14 -08008068 u32 tail;
8069 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008070
Dean Luickecd42f82016-02-03 14:35:14 -08008071 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8072 present = (rcd->seq_cnt ==
8073 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8074 else /* is RDMA rtail */
8075 present = (rcd->head != get_rcvhdrtail(rcd));
8076
8077 if (present)
8078 return 1;
8079
8080 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8081 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8082 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008083}
8084
8085/*
8086 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8087 * This routine will try to handle packets immediately (latency), but if
8088 * it finds too many, it will invoke the thread handler (bandwitdh). The
8089 * chip receive interupt is *not* cleared down until this or the thread (if
8090 * invoked) is finished. The intent is to avoid extra interrupts while we
8091 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008092 */
8093static irqreturn_t receive_context_interrupt(int irq, void *data)
8094{
8095 struct hfi1_ctxtdata *rcd = data;
8096 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008097 int disposition;
8098 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008099
8100 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8101 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008102 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008103
Dean Luickf4f30031c2015-10-26 10:28:44 -04008104 /* receive interrupt remains blocked while processing packets */
8105 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008106
Dean Luickf4f30031c2015-10-26 10:28:44 -04008107 /*
8108 * Too many packets were seen while processing packets in this
8109 * IRQ handler. Invoke the handler thread. The receive interrupt
8110 * remains blocked.
8111 */
8112 if (disposition == RCV_PKT_LIMIT)
8113 return IRQ_WAKE_THREAD;
8114
8115 /*
8116 * The packet processor detected no more packets. Clear the receive
8117 * interrupt and recheck for a packet packet that may have arrived
8118 * after the previous check and interrupt clear. If a packet arrived,
8119 * force another interrupt.
8120 */
8121 clear_recv_intr(rcd);
8122 present = check_packet_present(rcd);
8123 if (present)
8124 force_recv_intr(rcd);
8125
8126 return IRQ_HANDLED;
8127}
8128
8129/*
8130 * Receive packet thread handler. This expects to be invoked with the
8131 * receive interrupt still blocked.
8132 */
8133static irqreturn_t receive_context_thread(int irq, void *data)
8134{
8135 struct hfi1_ctxtdata *rcd = data;
8136 int present;
8137
8138 /* receive interrupt is still blocked from the IRQ handler */
8139 (void)rcd->do_interrupt(rcd, 1);
8140
8141 /*
8142 * The packet processor will only return if it detected no more
8143 * packets. Hold IRQs here so we can safely clear the interrupt and
8144 * recheck for a packet that may have arrived after the previous
8145 * check and the interrupt clear. If a packet arrived, force another
8146 * interrupt.
8147 */
8148 local_irq_disable();
8149 clear_recv_intr(rcd);
8150 present = check_packet_present(rcd);
8151 if (present)
8152 force_recv_intr(rcd);
8153 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008154
8155 return IRQ_HANDLED;
8156}
8157
8158/* ========================================================================= */
8159
8160u32 read_physical_state(struct hfi1_devdata *dd)
8161{
8162 u64 reg;
8163
8164 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8165 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8166 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8167}
8168
Jim Snowfb9036d2016-01-11 18:32:21 -05008169u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008170{
8171 u64 reg;
8172
8173 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8174 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8175 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8176}
8177
8178static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8179{
8180 u64 reg;
8181
8182 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8183 /* clear current state, set new state */
8184 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8185 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8186 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8187}
8188
8189/*
8190 * Use the 8051 to read a LCB CSR.
8191 */
8192static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8193{
8194 u32 regno;
8195 int ret;
8196
8197 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8198 if (acquire_lcb_access(dd, 0) == 0) {
8199 *data = read_csr(dd, addr);
8200 release_lcb_access(dd, 0);
8201 return 0;
8202 }
8203 return -EBUSY;
8204 }
8205
8206 /* register is an index of LCB registers: (offset - base) / 8 */
8207 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8208 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8209 if (ret != HCMD_SUCCESS)
8210 return -EBUSY;
8211 return 0;
8212}
8213
8214/*
8215 * Read an LCB CSR. Access may not be in host control, so check.
8216 * Return 0 on success, -EBUSY on failure.
8217 */
8218int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8219{
8220 struct hfi1_pportdata *ppd = dd->pport;
8221
8222 /* if up, go through the 8051 for the value */
8223 if (ppd->host_link_state & HLS_UP)
8224 return read_lcb_via_8051(dd, addr, data);
8225 /* if going up or down, no access */
8226 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8227 return -EBUSY;
8228 /* otherwise, host has access */
8229 *data = read_csr(dd, addr);
8230 return 0;
8231}
8232
8233/*
8234 * Use the 8051 to write a LCB CSR.
8235 */
8236static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8237{
Dean Luick3bf40d62015-11-06 20:07:04 -05008238 u32 regno;
8239 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008240
Dean Luick3bf40d62015-11-06 20:07:04 -05008241 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8242 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8243 if (acquire_lcb_access(dd, 0) == 0) {
8244 write_csr(dd, addr, data);
8245 release_lcb_access(dd, 0);
8246 return 0;
8247 }
8248 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008249 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008250
8251 /* register is an index of LCB registers: (offset - base) / 8 */
8252 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8253 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8254 if (ret != HCMD_SUCCESS)
8255 return -EBUSY;
8256 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008257}
8258
8259/*
8260 * Write an LCB CSR. Access may not be in host control, so check.
8261 * Return 0 on success, -EBUSY on failure.
8262 */
8263int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8264{
8265 struct hfi1_pportdata *ppd = dd->pport;
8266
8267 /* if up, go through the 8051 for the value */
8268 if (ppd->host_link_state & HLS_UP)
8269 return write_lcb_via_8051(dd, addr, data);
8270 /* if going up or down, no access */
8271 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8272 return -EBUSY;
8273 /* otherwise, host has access */
8274 write_csr(dd, addr, data);
8275 return 0;
8276}
8277
8278/*
8279 * Returns:
8280 * < 0 = Linux error, not able to get access
8281 * > 0 = 8051 command RETURN_CODE
8282 */
8283static int do_8051_command(
8284 struct hfi1_devdata *dd,
8285 u32 type,
8286 u64 in_data,
8287 u64 *out_data)
8288{
8289 u64 reg, completed;
8290 int return_code;
8291 unsigned long flags;
8292 unsigned long timeout;
8293
8294 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8295
8296 /*
8297 * Alternative to holding the lock for a long time:
8298 * - keep busy wait - have other users bounce off
8299 */
8300 spin_lock_irqsave(&dd->dc8051_lock, flags);
8301
8302 /* We can't send any commands to the 8051 if it's in reset */
8303 if (dd->dc_shutdown) {
8304 return_code = -ENODEV;
8305 goto fail;
8306 }
8307
8308 /*
8309 * If an 8051 host command timed out previously, then the 8051 is
8310 * stuck.
8311 *
8312 * On first timeout, attempt to reset and restart the entire DC
8313 * block (including 8051). (Is this too big of a hammer?)
8314 *
8315 * If the 8051 times out a second time, the reset did not bring it
8316 * back to healthy life. In that case, fail any subsequent commands.
8317 */
8318 if (dd->dc8051_timed_out) {
8319 if (dd->dc8051_timed_out > 1) {
8320 dd_dev_err(dd,
8321 "Previous 8051 host command timed out, skipping command %u\n",
8322 type);
8323 return_code = -ENXIO;
8324 goto fail;
8325 }
8326 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8327 dc_shutdown(dd);
8328 dc_start(dd);
8329 spin_lock_irqsave(&dd->dc8051_lock, flags);
8330 }
8331
8332 /*
8333 * If there is no timeout, then the 8051 command interface is
8334 * waiting for a command.
8335 */
8336
8337 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008338 * When writing a LCB CSR, out_data contains the full value to
8339 * to be written, while in_data contains the relative LCB
8340 * address in 7:0. Do the work here, rather than the caller,
8341 * of distrubting the write data to where it needs to go:
8342 *
8343 * Write data
8344 * 39:00 -> in_data[47:8]
8345 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8346 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8347 */
8348 if (type == HCMD_WRITE_LCB_CSR) {
8349 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8350 reg = ((((*out_data) >> 40) & 0xff) <<
8351 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8352 | ((((*out_data) >> 48) & 0xffff) <<
8353 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8354 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8355 }
8356
8357 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008358 * Do two writes: the first to stabilize the type and req_data, the
8359 * second to activate.
8360 */
8361 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8362 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8363 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8364 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8365 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8366 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8367 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8368
8369 /* wait for completion, alternate: interrupt */
8370 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8371 while (1) {
8372 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8373 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8374 if (completed)
8375 break;
8376 if (time_after(jiffies, timeout)) {
8377 dd->dc8051_timed_out++;
8378 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8379 if (out_data)
8380 *out_data = 0;
8381 return_code = -ETIMEDOUT;
8382 goto fail;
8383 }
8384 udelay(2);
8385 }
8386
8387 if (out_data) {
8388 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8389 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8390 if (type == HCMD_READ_LCB_CSR) {
8391 /* top 16 bits are in a different register */
8392 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8393 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8394 << (48
8395 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8396 }
8397 }
8398 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8399 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8400 dd->dc8051_timed_out = 0;
8401 /*
8402 * Clear command for next user.
8403 */
8404 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8405
8406fail:
8407 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8408
8409 return return_code;
8410}
8411
8412static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8413{
8414 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8415}
8416
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008417int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8418 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008419{
8420 u64 data;
8421 int ret;
8422
8423 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8424 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8425 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8426 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8427 if (ret != HCMD_SUCCESS) {
8428 dd_dev_err(dd,
8429 "load 8051 config: field id %d, lane %d, err %d\n",
8430 (int)field_id, (int)lane_id, ret);
8431 }
8432 return ret;
8433}
8434
8435/*
8436 * Read the 8051 firmware "registers". Use the RAM directly. Always
8437 * set the result, even on error.
8438 * Return 0 on success, -errno on failure
8439 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008440int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8441 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008442{
8443 u64 big_data;
8444 u32 addr;
8445 int ret;
8446
8447 /* address start depends on the lane_id */
8448 if (lane_id < 4)
8449 addr = (4 * NUM_GENERAL_FIELDS)
8450 + (lane_id * 4 * NUM_LANE_FIELDS);
8451 else
8452 addr = 0;
8453 addr += field_id * 4;
8454
8455 /* read is in 8-byte chunks, hardware will truncate the address down */
8456 ret = read_8051_data(dd, addr, 8, &big_data);
8457
8458 if (ret == 0) {
8459 /* extract the 4 bytes we want */
8460 if (addr & 0x4)
8461 *result = (u32)(big_data >> 32);
8462 else
8463 *result = (u32)big_data;
8464 } else {
8465 *result = 0;
8466 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8467 __func__, lane_id, field_id);
8468 }
8469
8470 return ret;
8471}
8472
8473static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8474 u8 continuous)
8475{
8476 u32 frame;
8477
8478 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8479 | power_management << POWER_MANAGEMENT_SHIFT;
8480 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8481 GENERAL_CONFIG, frame);
8482}
8483
8484static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8485 u16 vl15buf, u8 crc_sizes)
8486{
8487 u32 frame;
8488
8489 frame = (u32)vau << VAU_SHIFT
8490 | (u32)z << Z_SHIFT
8491 | (u32)vcu << VCU_SHIFT
8492 | (u32)vl15buf << VL15BUF_SHIFT
8493 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8494 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8495 GENERAL_CONFIG, frame);
8496}
8497
8498static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8499 u8 *flag_bits, u16 *link_widths)
8500{
8501 u32 frame;
8502
8503 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8504 &frame);
8505 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8506 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8507 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8508}
8509
8510static int write_vc_local_link_width(struct hfi1_devdata *dd,
8511 u8 misc_bits,
8512 u8 flag_bits,
8513 u16 link_widths)
8514{
8515 u32 frame;
8516
8517 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8518 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8519 | (u32)link_widths << LINK_WIDTH_SHIFT;
8520 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8521 frame);
8522}
8523
8524static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8525 u8 device_rev)
8526{
8527 u32 frame;
8528
8529 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8530 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8531 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8532}
8533
8534static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8535 u8 *device_rev)
8536{
8537 u32 frame;
8538
8539 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8540 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8541 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8542 & REMOTE_DEVICE_REV_MASK;
8543}
8544
8545void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8546{
8547 u32 frame;
8548
8549 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8550 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8551 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8552}
8553
8554static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8555 u8 *continuous)
8556{
8557 u32 frame;
8558
8559 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8560 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8561 & POWER_MANAGEMENT_MASK;
8562 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8563 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8564}
8565
8566static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8567 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8568{
8569 u32 frame;
8570
8571 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8572 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8573 *z = (frame >> Z_SHIFT) & Z_MASK;
8574 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8575 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8576 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8577}
8578
8579static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8580 u8 *remote_tx_rate,
8581 u16 *link_widths)
8582{
8583 u32 frame;
8584
8585 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8586 &frame);
8587 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8588 & REMOTE_TX_RATE_MASK;
8589 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8590}
8591
8592static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8593{
8594 u32 frame;
8595
8596 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8597 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8598}
8599
8600static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8601{
8602 u32 frame;
8603
8604 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8605 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8606}
8607
8608static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8609{
8610 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8611}
8612
8613static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8614{
8615 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8616}
8617
8618void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8619{
8620 u32 frame;
8621 int ret;
8622
8623 *link_quality = 0;
8624 if (dd->pport->host_link_state & HLS_UP) {
8625 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8626 &frame);
8627 if (ret == 0)
8628 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8629 & LINK_QUALITY_MASK;
8630 }
8631}
8632
8633static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8634{
8635 u32 frame;
8636
8637 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8638 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8639}
8640
8641static int read_tx_settings(struct hfi1_devdata *dd,
8642 u8 *enable_lane_tx,
8643 u8 *tx_polarity_inversion,
8644 u8 *rx_polarity_inversion,
8645 u8 *max_rate)
8646{
8647 u32 frame;
8648 int ret;
8649
8650 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8651 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8652 & ENABLE_LANE_TX_MASK;
8653 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8654 & TX_POLARITY_INVERSION_MASK;
8655 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8656 & RX_POLARITY_INVERSION_MASK;
8657 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8658 return ret;
8659}
8660
8661static int write_tx_settings(struct hfi1_devdata *dd,
8662 u8 enable_lane_tx,
8663 u8 tx_polarity_inversion,
8664 u8 rx_polarity_inversion,
8665 u8 max_rate)
8666{
8667 u32 frame;
8668
8669 /* no need to mask, all variable sizes match field widths */
8670 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8671 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8672 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8673 | max_rate << MAX_RATE_SHIFT;
8674 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8675}
8676
8677static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8678{
8679 u32 frame, version, prod_id;
8680 int ret, lane;
8681
8682 /* 4 lanes */
8683 for (lane = 0; lane < 4; lane++) {
8684 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8685 if (ret) {
8686 dd_dev_err(
8687 dd,
8688 "Unable to read lane %d firmware details\n",
8689 lane);
8690 continue;
8691 }
8692 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8693 & SPICO_ROM_VERSION_MASK;
8694 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8695 & SPICO_ROM_PROD_ID_MASK;
8696 dd_dev_info(dd,
8697 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8698 lane, version, prod_id);
8699 }
8700}
8701
8702/*
8703 * Read an idle LCB message.
8704 *
8705 * Returns 0 on success, -EINVAL on error
8706 */
8707static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8708{
8709 int ret;
8710
8711 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8712 type, data_out);
8713 if (ret != HCMD_SUCCESS) {
8714 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8715 (u32)type, ret);
8716 return -EINVAL;
8717 }
8718 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8719 /* return only the payload as we already know the type */
8720 *data_out >>= IDLE_PAYLOAD_SHIFT;
8721 return 0;
8722}
8723
8724/*
8725 * Read an idle SMA message. To be done in response to a notification from
8726 * the 8051.
8727 *
8728 * Returns 0 on success, -EINVAL on error
8729 */
8730static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8731{
8732 return read_idle_message(dd,
8733 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8734}
8735
8736/*
8737 * Send an idle LCB message.
8738 *
8739 * Returns 0 on success, -EINVAL on error
8740 */
8741static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8742{
8743 int ret;
8744
8745 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8746 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8747 if (ret != HCMD_SUCCESS) {
8748 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8749 data, ret);
8750 return -EINVAL;
8751 }
8752 return 0;
8753}
8754
8755/*
8756 * Send an idle SMA message.
8757 *
8758 * Returns 0 on success, -EINVAL on error
8759 */
8760int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8761{
8762 u64 data;
8763
8764 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8765 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8766 return send_idle_message(dd, data);
8767}
8768
8769/*
8770 * Initialize the LCB then do a quick link up. This may or may not be
8771 * in loopback.
8772 *
8773 * return 0 on success, -errno on error
8774 */
8775static int do_quick_linkup(struct hfi1_devdata *dd)
8776{
8777 u64 reg;
8778 unsigned long timeout;
8779 int ret;
8780
8781 lcb_shutdown(dd, 0);
8782
8783 if (loopback) {
8784 /* LCB_CFG_LOOPBACK.VAL = 2 */
8785 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8786 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8787 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8788 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8789 }
8790
8791 /* start the LCBs */
8792 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8793 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8794
8795 /* simulator only loopback steps */
8796 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8797 /* LCB_CFG_RUN.EN = 1 */
8798 write_csr(dd, DC_LCB_CFG_RUN,
8799 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8800
8801 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8802 timeout = jiffies + msecs_to_jiffies(10);
8803 while (1) {
8804 reg = read_csr(dd,
8805 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8806 if (reg)
8807 break;
8808 if (time_after(jiffies, timeout)) {
8809 dd_dev_err(dd,
8810 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8811 return -ETIMEDOUT;
8812 }
8813 udelay(2);
8814 }
8815
8816 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8817 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8818 }
8819
8820 if (!loopback) {
8821 /*
8822 * When doing quick linkup and not in loopback, both
8823 * sides must be done with LCB set-up before either
8824 * starts the quick linkup. Put a delay here so that
8825 * both sides can be started and have a chance to be
8826 * done with LCB set up before resuming.
8827 */
8828 dd_dev_err(dd,
8829 "Pausing for peer to be finished with LCB set up\n");
8830 msleep(5000);
8831 dd_dev_err(dd,
8832 "Continuing with quick linkup\n");
8833 }
8834
8835 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8836 set_8051_lcb_access(dd);
8837
8838 /*
8839 * State "quick" LinkUp request sets the physical link state to
8840 * LinkUp without a verify capability sequence.
8841 * This state is in simulator v37 and later.
8842 */
8843 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8844 if (ret != HCMD_SUCCESS) {
8845 dd_dev_err(dd,
8846 "%s: set physical link state to quick LinkUp failed with return %d\n",
8847 __func__, ret);
8848
8849 set_host_lcb_access(dd);
8850 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8851
8852 if (ret >= 0)
8853 ret = -EINVAL;
8854 return ret;
8855 }
8856
8857 return 0; /* success */
8858}
8859
8860/*
8861 * Set the SerDes to internal loopback mode.
8862 * Returns 0 on success, -errno on error.
8863 */
8864static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8865{
8866 int ret;
8867
8868 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8869 if (ret == HCMD_SUCCESS)
8870 return 0;
8871 dd_dev_err(dd,
8872 "Set physical link state to SerDes Loopback failed with return %d\n",
8873 ret);
8874 if (ret >= 0)
8875 ret = -EINVAL;
8876 return ret;
8877}
8878
8879/*
8880 * Do all special steps to set up loopback.
8881 */
8882static int init_loopback(struct hfi1_devdata *dd)
8883{
8884 dd_dev_info(dd, "Entering loopback mode\n");
8885
8886 /* all loopbacks should disable self GUID check */
8887 write_csr(dd, DC_DC8051_CFG_MODE,
8888 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8889
8890 /*
8891 * The simulator has only one loopback option - LCB. Switch
8892 * to that option, which includes quick link up.
8893 *
8894 * Accept all valid loopback values.
8895 */
8896 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8897 && (loopback == LOOPBACK_SERDES
8898 || loopback == LOOPBACK_LCB
8899 || loopback == LOOPBACK_CABLE)) {
8900 loopback = LOOPBACK_LCB;
8901 quick_linkup = 1;
8902 return 0;
8903 }
8904
8905 /* handle serdes loopback */
8906 if (loopback == LOOPBACK_SERDES) {
8907 /* internal serdes loopack needs quick linkup on RTL */
8908 if (dd->icode == ICODE_RTL_SILICON)
8909 quick_linkup = 1;
8910 return set_serdes_loopback_mode(dd);
8911 }
8912
8913 /* LCB loopback - handled at poll time */
8914 if (loopback == LOOPBACK_LCB) {
8915 quick_linkup = 1; /* LCB is always quick linkup */
8916
8917 /* not supported in emulation due to emulation RTL changes */
8918 if (dd->icode == ICODE_FPGA_EMULATION) {
8919 dd_dev_err(dd,
8920 "LCB loopback not supported in emulation\n");
8921 return -EINVAL;
8922 }
8923 return 0;
8924 }
8925
8926 /* external cable loopback requires no extra steps */
8927 if (loopback == LOOPBACK_CABLE)
8928 return 0;
8929
8930 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8931 return -EINVAL;
8932}
8933
8934/*
8935 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8936 * used in the Verify Capability link width attribute.
8937 */
8938static u16 opa_to_vc_link_widths(u16 opa_widths)
8939{
8940 int i;
8941 u16 result = 0;
8942
8943 static const struct link_bits {
8944 u16 from;
8945 u16 to;
8946 } opa_link_xlate[] = {
8947 { OPA_LINK_WIDTH_1X, 1 << (1-1) },
8948 { OPA_LINK_WIDTH_2X, 1 << (2-1) },
8949 { OPA_LINK_WIDTH_3X, 1 << (3-1) },
8950 { OPA_LINK_WIDTH_4X, 1 << (4-1) },
8951 };
8952
8953 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8954 if (opa_widths & opa_link_xlate[i].from)
8955 result |= opa_link_xlate[i].to;
8956 }
8957 return result;
8958}
8959
8960/*
8961 * Set link attributes before moving to polling.
8962 */
8963static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8964{
8965 struct hfi1_devdata *dd = ppd->dd;
8966 u8 enable_lane_tx;
8967 u8 tx_polarity_inversion;
8968 u8 rx_polarity_inversion;
8969 int ret;
8970
8971 /* reset our fabric serdes to clear any lingering problems */
8972 fabric_serdes_reset(dd);
8973
8974 /* set the local tx rate - need to read-modify-write */
8975 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8976 &rx_polarity_inversion, &ppd->local_tx_rate);
8977 if (ret)
8978 goto set_local_link_attributes_fail;
8979
8980 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8981 /* set the tx rate to the fastest enabled */
8982 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8983 ppd->local_tx_rate = 1;
8984 else
8985 ppd->local_tx_rate = 0;
8986 } else {
8987 /* set the tx rate to all enabled */
8988 ppd->local_tx_rate = 0;
8989 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8990 ppd->local_tx_rate |= 2;
8991 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8992 ppd->local_tx_rate |= 1;
8993 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04008994
8995 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008996 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8997 rx_polarity_inversion, ppd->local_tx_rate);
8998 if (ret != HCMD_SUCCESS)
8999 goto set_local_link_attributes_fail;
9000
9001 /*
9002 * DC supports continuous updates.
9003 */
9004 ret = write_vc_local_phy(dd, 0 /* no power management */,
9005 1 /* continuous updates */);
9006 if (ret != HCMD_SUCCESS)
9007 goto set_local_link_attributes_fail;
9008
9009 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9010 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9011 ppd->port_crc_mode_enabled);
9012 if (ret != HCMD_SUCCESS)
9013 goto set_local_link_attributes_fail;
9014
9015 ret = write_vc_local_link_width(dd, 0, 0,
9016 opa_to_vc_link_widths(ppd->link_width_enabled));
9017 if (ret != HCMD_SUCCESS)
9018 goto set_local_link_attributes_fail;
9019
9020 /* let peer know who we are */
9021 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9022 if (ret == HCMD_SUCCESS)
9023 return 0;
9024
9025set_local_link_attributes_fail:
9026 dd_dev_err(dd,
9027 "Failed to set local link attributes, return 0x%x\n",
9028 ret);
9029 return ret;
9030}
9031
9032/*
9033 * Call this to start the link. Schedule a retry if the cable is not
9034 * present or if unable to start polling. Do not do anything if the
9035 * link is disabled. Returns 0 if link is disabled or moved to polling
9036 */
9037int start_link(struct hfi1_pportdata *ppd)
9038{
9039 if (!ppd->link_enabled) {
9040 dd_dev_info(ppd->dd,
9041 "%s: stopping link start because link is disabled\n",
9042 __func__);
9043 return 0;
9044 }
9045 if (!ppd->driver_link_ready) {
9046 dd_dev_info(ppd->dd,
9047 "%s: stopping link start because driver is not ready\n",
9048 __func__);
9049 return 0;
9050 }
9051
9052 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
9053 loopback == LOOPBACK_LCB ||
9054 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9055 return set_link_state(ppd, HLS_DN_POLL);
9056
9057 dd_dev_info(ppd->dd,
9058 "%s: stopping link start because no cable is present\n",
9059 __func__);
9060 return -EAGAIN;
9061}
9062
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009063static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9064{
9065 struct hfi1_devdata *dd = ppd->dd;
9066 u64 mask;
9067 unsigned long timeout;
9068
9069 /*
9070 * Check for QSFP interrupt for t_init (SFF 8679)
9071 */
9072 timeout = jiffies + msecs_to_jiffies(2000);
9073 while (1) {
9074 mask = read_csr(dd, dd->hfi1_id ?
9075 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9076 if (!(mask & QSFP_HFI0_INT_N)) {
9077 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9078 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9079 break;
9080 }
9081 if (time_after(jiffies, timeout)) {
9082 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9083 __func__);
9084 break;
9085 }
9086 udelay(2);
9087 }
9088}
9089
9090static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9091{
9092 struct hfi1_devdata *dd = ppd->dd;
9093 u64 mask;
9094
9095 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9096 if (enable)
9097 mask |= (u64)QSFP_HFI0_INT_N;
9098 else
9099 mask &= ~(u64)QSFP_HFI0_INT_N;
9100 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9101}
9102
9103void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009104{
9105 struct hfi1_devdata *dd = ppd->dd;
9106 u64 mask, qsfp_mask;
9107
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009108 /* Disable INT_N from triggering QSFP interrupts */
9109 set_qsfp_int_n(ppd, 0);
9110
9111 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009112 mask = (u64)QSFP_HFI0_RESET_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009113 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009114 qsfp_mask |= mask;
9115 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009116 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009117
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009118 qsfp_mask = read_csr(dd, dd->hfi1_id ?
9119 ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009120 qsfp_mask &= ~mask;
9121 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009122 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009123
9124 udelay(10);
9125
9126 qsfp_mask |= mask;
9127 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009128 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9129
9130 wait_for_qsfp_init(ppd);
9131
9132 /*
9133 * Allow INT_N to trigger the QSFP interrupt to watch
9134 * for alarms and warnings
9135 */
9136 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009137}
9138
9139static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9140 u8 *qsfp_interrupt_status)
9141{
9142 struct hfi1_devdata *dd = ppd->dd;
9143
9144 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9145 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9146 dd_dev_info(dd,
9147 "%s: QSFP cable on fire\n",
9148 __func__);
9149
9150 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9151 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9152 dd_dev_info(dd,
9153 "%s: QSFP cable temperature too low\n",
9154 __func__);
9155
9156 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9157 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9158 dd_dev_info(dd,
9159 "%s: QSFP supply voltage too high\n",
9160 __func__);
9161
9162 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9163 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9164 dd_dev_info(dd,
9165 "%s: QSFP supply voltage too low\n",
9166 __func__);
9167
9168 /* Byte 2 is vendor specific */
9169
9170 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9171 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9172 dd_dev_info(dd,
9173 "%s: Cable RX channel 1/2 power too high\n",
9174 __func__);
9175
9176 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9177 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9178 dd_dev_info(dd,
9179 "%s: Cable RX channel 1/2 power too low\n",
9180 __func__);
9181
9182 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9183 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9184 dd_dev_info(dd,
9185 "%s: Cable RX channel 3/4 power too high\n",
9186 __func__);
9187
9188 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9189 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9190 dd_dev_info(dd,
9191 "%s: Cable RX channel 3/4 power too low\n",
9192 __func__);
9193
9194 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9195 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9196 dd_dev_info(dd,
9197 "%s: Cable TX channel 1/2 bias too high\n",
9198 __func__);
9199
9200 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9201 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9202 dd_dev_info(dd,
9203 "%s: Cable TX channel 1/2 bias too low\n",
9204 __func__);
9205
9206 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9207 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9208 dd_dev_info(dd,
9209 "%s: Cable TX channel 3/4 bias too high\n",
9210 __func__);
9211
9212 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9213 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9214 dd_dev_info(dd,
9215 "%s: Cable TX channel 3/4 bias too low\n",
9216 __func__);
9217
9218 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9219 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9220 dd_dev_info(dd,
9221 "%s: Cable TX channel 1/2 power too high\n",
9222 __func__);
9223
9224 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9225 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9226 dd_dev_info(dd,
9227 "%s: Cable TX channel 1/2 power too low\n",
9228 __func__);
9229
9230 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9231 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9232 dd_dev_info(dd,
9233 "%s: Cable TX channel 3/4 power too high\n",
9234 __func__);
9235
9236 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9237 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9238 dd_dev_info(dd,
9239 "%s: Cable TX channel 3/4 power too low\n",
9240 __func__);
9241
9242 /* Bytes 9-10 and 11-12 are reserved */
9243 /* Bytes 13-15 are vendor specific */
9244
9245 return 0;
9246}
9247
Mike Marciniszyn77241052015-07-30 15:17:43 -04009248/* This routine will only be scheduled if the QSFP module is present */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009249void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009250{
9251 struct qsfp_data *qd;
9252 struct hfi1_pportdata *ppd;
9253 struct hfi1_devdata *dd;
9254
9255 qd = container_of(work, struct qsfp_data, qsfp_work);
9256 ppd = qd->ppd;
9257 dd = ppd->dd;
9258
9259 /* Sanity check */
9260 if (!qsfp_mod_present(ppd))
9261 return;
9262
9263 /*
9264 * Turn DC back on after cables has been
9265 * re-inserted. Up until now, the DC has been in
9266 * reset to save power.
9267 */
9268 dc_start(dd);
9269
9270 if (qd->cache_refresh_required) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009271
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009272 set_qsfp_int_n(ppd, 0);
9273
9274 wait_for_qsfp_init(ppd);
9275
9276 /*
9277 * Allow INT_N to trigger the QSFP interrupt to watch
9278 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009279 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009280 set_qsfp_int_n(ppd, 1);
9281
9282 tune_serdes(ppd);
9283
9284 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009285 }
9286
9287 if (qd->check_interrupt_flags) {
9288 u8 qsfp_interrupt_status[16] = {0,};
9289
9290 if (qsfp_read(ppd, dd->hfi1_id, 6,
9291 &qsfp_interrupt_status[0], 16) != 16) {
9292 dd_dev_info(dd,
9293 "%s: Failed to read status of QSFP module\n",
9294 __func__);
9295 } else {
9296 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009297
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009298 handle_qsfp_error_conditions(
9299 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009300 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9301 ppd->qsfp_info.check_interrupt_flags = 0;
9302 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9303 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009304 }
9305 }
9306}
9307
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009308static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009309{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009310 struct hfi1_pportdata *ppd = dd->pport;
9311 u64 qsfp_mask, cce_int_mask;
9312 const int qsfp1_int_smask = QSFP1_INT % 64;
9313 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009314
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009315 /*
9316 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9317 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9318 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9319 * the index of the appropriate CSR in the CCEIntMask CSR array
9320 */
9321 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9322 (8 * (QSFP1_INT / 64)));
9323 if (dd->hfi1_id) {
9324 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9325 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9326 cce_int_mask);
9327 } else {
9328 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9329 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9330 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009331 }
9332
Mike Marciniszyn77241052015-07-30 15:17:43 -04009333 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9334 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009335 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9336 qsfp_mask);
9337 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9338 qsfp_mask);
9339
9340 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009341
9342 /* Handle active low nature of INT_N and MODPRST_N pins */
9343 if (qsfp_mod_present(ppd))
9344 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9345 write_csr(dd,
9346 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9347 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009348}
9349
Dean Luickbbdeb332015-12-01 15:38:15 -05009350/*
9351 * Do a one-time initialize of the LCB block.
9352 */
9353static void init_lcb(struct hfi1_devdata *dd)
9354{
Dean Luicka59329d2016-02-03 14:32:31 -08009355 /* simulator does not correctly handle LCB cclk loopback, skip */
9356 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9357 return;
9358
Dean Luickbbdeb332015-12-01 15:38:15 -05009359 /* the DC has been reset earlier in the driver load */
9360
9361 /* set LCB for cclk loopback on the port */
9362 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9363 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9364 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9365 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9366 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9367 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9368 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9369}
9370
Mike Marciniszyn77241052015-07-30 15:17:43 -04009371int bringup_serdes(struct hfi1_pportdata *ppd)
9372{
9373 struct hfi1_devdata *dd = ppd->dd;
9374 u64 guid;
9375 int ret;
9376
9377 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9378 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9379
9380 guid = ppd->guid;
9381 if (!guid) {
9382 if (dd->base_guid)
9383 guid = dd->base_guid + ppd->port - 1;
9384 ppd->guid = guid;
9385 }
9386
Mike Marciniszyn77241052015-07-30 15:17:43 -04009387 /* Set linkinit_reason on power up per OPA spec */
9388 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9389
Dean Luickbbdeb332015-12-01 15:38:15 -05009390 /* one-time init of the LCB */
9391 init_lcb(dd);
9392
Mike Marciniszyn77241052015-07-30 15:17:43 -04009393 if (loopback) {
9394 ret = init_loopback(dd);
9395 if (ret < 0)
9396 return ret;
9397 }
9398
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009399 /* tune the SERDES to a ballpark setting for
9400 * optimal signal and bit error rate
9401 * Needs to be done before starting the link
9402 */
9403 tune_serdes(ppd);
9404
Mike Marciniszyn77241052015-07-30 15:17:43 -04009405 return start_link(ppd);
9406}
9407
9408void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9409{
9410 struct hfi1_devdata *dd = ppd->dd;
9411
9412 /*
9413 * Shut down the link and keep it down. First turn off that the
9414 * driver wants to allow the link to be up (driver_link_ready).
9415 * Then make sure the link is not automatically restarted
9416 * (link_enabled). Cancel any pending restart. And finally
9417 * go offline.
9418 */
9419 ppd->driver_link_ready = 0;
9420 ppd->link_enabled = 0;
9421
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009422 ppd->offline_disabled_reason =
9423 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009424 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9425 OPA_LINKDOWN_REASON_SMA_DISABLED);
9426 set_link_state(ppd, HLS_DN_OFFLINE);
9427
9428 /* disable the port */
9429 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9430}
9431
9432static inline int init_cpu_counters(struct hfi1_devdata *dd)
9433{
9434 struct hfi1_pportdata *ppd;
9435 int i;
9436
9437 ppd = (struct hfi1_pportdata *)(dd + 1);
9438 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009439 ppd->ibport_data.rvp.rc_acks = NULL;
9440 ppd->ibport_data.rvp.rc_qacks = NULL;
9441 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9442 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9443 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9444 if (!ppd->ibport_data.rvp.rc_acks ||
9445 !ppd->ibport_data.rvp.rc_delayed_comp ||
9446 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009447 return -ENOMEM;
9448 }
9449
9450 return 0;
9451}
9452
9453static const char * const pt_names[] = {
9454 "expected",
9455 "eager",
9456 "invalid"
9457};
9458
9459static const char *pt_name(u32 type)
9460{
9461 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9462}
9463
9464/*
9465 * index is the index into the receive array
9466 */
9467void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9468 u32 type, unsigned long pa, u16 order)
9469{
9470 u64 reg;
9471 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9472 (dd->kregbase + RCV_ARRAY));
9473
9474 if (!(dd->flags & HFI1_PRESENT))
9475 goto done;
9476
9477 if (type == PT_INVALID) {
9478 pa = 0;
9479 } else if (type > PT_INVALID) {
9480 dd_dev_err(dd,
9481 "unexpected receive array type %u for index %u, not handled\n",
9482 type, index);
9483 goto done;
9484 }
9485
9486 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9487 pt_name(type), index, pa, (unsigned long)order);
9488
9489#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9490 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9491 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9492 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9493 << RCV_ARRAY_RT_ADDR_SHIFT;
9494 writeq(reg, base + (index * 8));
9495
9496 if (type == PT_EAGER)
9497 /*
9498 * Eager entries are written one-by-one so we have to push them
9499 * after we write the entry.
9500 */
9501 flush_wc();
9502done:
9503 return;
9504}
9505
9506void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9507{
9508 struct hfi1_devdata *dd = rcd->dd;
9509 u32 i;
9510
9511 /* this could be optimized */
9512 for (i = rcd->eager_base; i < rcd->eager_base +
9513 rcd->egrbufs.alloced; i++)
9514 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9515
9516 for (i = rcd->expected_base;
9517 i < rcd->expected_base + rcd->expected_count; i++)
9518 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9519}
9520
9521int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9522 struct hfi1_ctxt_info *kinfo)
9523{
9524 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9525 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9526 return 0;
9527}
9528
9529struct hfi1_message_header *hfi1_get_msgheader(
9530 struct hfi1_devdata *dd, __le32 *rhf_addr)
9531{
9532 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9533
9534 return (struct hfi1_message_header *)
9535 (rhf_addr - dd->rhf_offset + offset);
9536}
9537
9538static const char * const ib_cfg_name_strings[] = {
9539 "HFI1_IB_CFG_LIDLMC",
9540 "HFI1_IB_CFG_LWID_DG_ENB",
9541 "HFI1_IB_CFG_LWID_ENB",
9542 "HFI1_IB_CFG_LWID",
9543 "HFI1_IB_CFG_SPD_ENB",
9544 "HFI1_IB_CFG_SPD",
9545 "HFI1_IB_CFG_RXPOL_ENB",
9546 "HFI1_IB_CFG_LREV_ENB",
9547 "HFI1_IB_CFG_LINKLATENCY",
9548 "HFI1_IB_CFG_HRTBT",
9549 "HFI1_IB_CFG_OP_VLS",
9550 "HFI1_IB_CFG_VL_HIGH_CAP",
9551 "HFI1_IB_CFG_VL_LOW_CAP",
9552 "HFI1_IB_CFG_OVERRUN_THRESH",
9553 "HFI1_IB_CFG_PHYERR_THRESH",
9554 "HFI1_IB_CFG_LINKDEFAULT",
9555 "HFI1_IB_CFG_PKEYS",
9556 "HFI1_IB_CFG_MTU",
9557 "HFI1_IB_CFG_LSTATE",
9558 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9559 "HFI1_IB_CFG_PMA_TICKS",
9560 "HFI1_IB_CFG_PORT"
9561};
9562
9563static const char *ib_cfg_name(int which)
9564{
9565 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9566 return "invalid";
9567 return ib_cfg_name_strings[which];
9568}
9569
9570int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9571{
9572 struct hfi1_devdata *dd = ppd->dd;
9573 int val = 0;
9574
9575 switch (which) {
9576 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9577 val = ppd->link_width_enabled;
9578 break;
9579 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9580 val = ppd->link_width_active;
9581 break;
9582 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9583 val = ppd->link_speed_enabled;
9584 break;
9585 case HFI1_IB_CFG_SPD: /* current Link speed */
9586 val = ppd->link_speed_active;
9587 break;
9588
9589 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9590 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9591 case HFI1_IB_CFG_LINKLATENCY:
9592 goto unimplemented;
9593
9594 case HFI1_IB_CFG_OP_VLS:
9595 val = ppd->vls_operational;
9596 break;
9597 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9598 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9599 break;
9600 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9601 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9602 break;
9603 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9604 val = ppd->overrun_threshold;
9605 break;
9606 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9607 val = ppd->phy_error_threshold;
9608 break;
9609 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9610 val = dd->link_default;
9611 break;
9612
9613 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9614 case HFI1_IB_CFG_PMA_TICKS:
9615 default:
9616unimplemented:
9617 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9618 dd_dev_info(
9619 dd,
9620 "%s: which %s: not implemented\n",
9621 __func__,
9622 ib_cfg_name(which));
9623 break;
9624 }
9625
9626 return val;
9627}
9628
9629/*
9630 * The largest MAD packet size.
9631 */
9632#define MAX_MAD_PACKET 2048
9633
9634/*
9635 * Return the maximum header bytes that can go on the _wire_
9636 * for this device. This count includes the ICRC which is
9637 * not part of the packet held in memory but it is appended
9638 * by the HW.
9639 * This is dependent on the device's receive header entry size.
9640 * HFI allows this to be set per-receive context, but the
9641 * driver presently enforces a global value.
9642 */
9643u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9644{
9645 /*
9646 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9647 * the Receive Header Entry Size minus the PBC (or RHF) size
9648 * plus one DW for the ICRC appended by HW.
9649 *
9650 * dd->rcd[0].rcvhdrqentsize is in DW.
9651 * We use rcd[0] as all context will have the same value. Also,
9652 * the first kernel context would have been allocated by now so
9653 * we are guaranteed a valid value.
9654 */
9655 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9656}
9657
9658/*
9659 * Set Send Length
9660 * @ppd - per port data
9661 *
9662 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9663 * registers compare against LRH.PktLen, so use the max bytes included
9664 * in the LRH.
9665 *
9666 * This routine changes all VL values except VL15, which it maintains at
9667 * the same value.
9668 */
9669static void set_send_length(struct hfi1_pportdata *ppd)
9670{
9671 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009672 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9673 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009674 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9675 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9676 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9677 int i;
9678
9679 for (i = 0; i < ppd->vls_supported; i++) {
9680 if (dd->vld[i].mtu > maxvlmtu)
9681 maxvlmtu = dd->vld[i].mtu;
9682 if (i <= 3)
9683 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9684 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9685 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9686 else
9687 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9688 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9689 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9690 }
9691 write_csr(dd, SEND_LEN_CHECK0, len1);
9692 write_csr(dd, SEND_LEN_CHECK1, len2);
9693 /* adjust kernel credit return thresholds based on new MTUs */
9694 /* all kernel receive contexts have the same hdrqentsize */
9695 for (i = 0; i < ppd->vls_supported; i++) {
9696 sc_set_cr_threshold(dd->vld[i].sc,
9697 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9698 dd->rcd[0]->rcvhdrqentsize));
9699 }
9700 sc_set_cr_threshold(dd->vld[15].sc,
9701 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9702 dd->rcd[0]->rcvhdrqentsize));
9703
9704 /* Adjust maximum MTU for the port in DC */
9705 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9706 (ilog2(maxvlmtu >> 8) + 1);
9707 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9708 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9709 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9710 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9711 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9712}
9713
9714static void set_lidlmc(struct hfi1_pportdata *ppd)
9715{
9716 int i;
9717 u64 sreg = 0;
9718 struct hfi1_devdata *dd = ppd->dd;
9719 u32 mask = ~((1U << ppd->lmc) - 1);
9720 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9721
9722 if (dd->hfi1_snoop.mode_flag)
9723 dd_dev_info(dd, "Set lid/lmc while snooping");
9724
9725 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9726 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9727 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9728 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9729 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9730 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9731 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9732
9733 /*
9734 * Iterate over all the send contexts and set their SLID check
9735 */
9736 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9737 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9738 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9739 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9740
9741 for (i = 0; i < dd->chip_send_contexts; i++) {
9742 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9743 i, (u32)sreg);
9744 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9745 }
9746
9747 /* Now we have to do the same thing for the sdma engines */
9748 sdma_update_lmc(dd, mask, ppd->lid);
9749}
9750
9751static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9752{
9753 unsigned long timeout;
9754 u32 curr_state;
9755
9756 timeout = jiffies + msecs_to_jiffies(msecs);
9757 while (1) {
9758 curr_state = read_physical_state(dd);
9759 if (curr_state == state)
9760 break;
9761 if (time_after(jiffies, timeout)) {
9762 dd_dev_err(dd,
9763 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9764 state, curr_state);
9765 return -ETIMEDOUT;
9766 }
9767 usleep_range(1950, 2050); /* sleep 2ms-ish */
9768 }
9769
9770 return 0;
9771}
9772
9773/*
9774 * Helper for set_link_state(). Do not call except from that routine.
9775 * Expects ppd->hls_mutex to be held.
9776 *
9777 * @rem_reason value to be sent to the neighbor
9778 *
9779 * LinkDownReasons only set if transition succeeds.
9780 */
9781static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9782{
9783 struct hfi1_devdata *dd = ppd->dd;
9784 u32 pstate, previous_state;
9785 u32 last_local_state;
9786 u32 last_remote_state;
9787 int ret;
9788 int do_transition;
9789 int do_wait;
9790
9791 previous_state = ppd->host_link_state;
9792 ppd->host_link_state = HLS_GOING_OFFLINE;
9793 pstate = read_physical_state(dd);
9794 if (pstate == PLS_OFFLINE) {
9795 do_transition = 0; /* in right state */
9796 do_wait = 0; /* ...no need to wait */
9797 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9798 do_transition = 0; /* in an offline transient state */
9799 do_wait = 1; /* ...wait for it to settle */
9800 } else {
9801 do_transition = 1; /* need to move to offline */
9802 do_wait = 1; /* ...will need to wait */
9803 }
9804
9805 if (do_transition) {
9806 ret = set_physical_link_state(dd,
9807 PLS_OFFLINE | (rem_reason << 8));
9808
9809 if (ret != HCMD_SUCCESS) {
9810 dd_dev_err(dd,
9811 "Failed to transition to Offline link state, return %d\n",
9812 ret);
9813 return -EINVAL;
9814 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009815 if (ppd->offline_disabled_reason ==
9816 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009817 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009818 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009819 }
9820
9821 if (do_wait) {
9822 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009823 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009824 if (ret < 0)
9825 return ret;
9826 }
9827
9828 /* make sure the logical state is also down */
9829 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9830
9831 /*
9832 * Now in charge of LCB - must be after the physical state is
9833 * offline.quiet and before host_link_state is changed.
9834 */
9835 set_host_lcb_access(dd);
9836 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9837 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9838
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009839 if (ppd->port_type == PORT_TYPE_QSFP &&
9840 ppd->qsfp_info.limiting_active &&
9841 qsfp_mod_present(ppd)) {
9842 set_qsfp_tx(ppd, 0);
9843 }
9844
Mike Marciniszyn77241052015-07-30 15:17:43 -04009845 /*
9846 * The LNI has a mandatory wait time after the physical state
9847 * moves to Offline.Quiet. The wait time may be different
9848 * depending on how the link went down. The 8051 firmware
9849 * will observe the needed wait time and only move to ready
9850 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009851 * is 6s, so wait that long and then at least 0.5s more for
9852 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009853 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009854 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009855 if (ret) {
9856 dd_dev_err(dd,
9857 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9858 /* state is really offline, so make it so */
9859 ppd->host_link_state = HLS_DN_OFFLINE;
9860 return ret;
9861 }
9862
9863 /*
9864 * The state is now offline and the 8051 is ready to accept host
9865 * requests.
9866 * - change our state
9867 * - notify others if we were previously in a linkup state
9868 */
9869 ppd->host_link_state = HLS_DN_OFFLINE;
9870 if (previous_state & HLS_UP) {
9871 /* went down while link was up */
9872 handle_linkup_change(dd, 0);
9873 } else if (previous_state
9874 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9875 /* went down while attempting link up */
9876 /* byte 1 of last_*_state is the failure reason */
9877 read_last_local_state(dd, &last_local_state);
9878 read_last_remote_state(dd, &last_remote_state);
9879 dd_dev_err(dd,
9880 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9881 last_local_state, last_remote_state);
9882 }
9883
9884 /* the active link width (downgrade) is 0 on link down */
9885 ppd->link_width_active = 0;
9886 ppd->link_width_downgrade_tx_active = 0;
9887 ppd->link_width_downgrade_rx_active = 0;
9888 ppd->current_egress_rate = 0;
9889 return 0;
9890}
9891
9892/* return the link state name */
9893static const char *link_state_name(u32 state)
9894{
9895 const char *name;
9896 int n = ilog2(state);
9897 static const char * const names[] = {
9898 [__HLS_UP_INIT_BP] = "INIT",
9899 [__HLS_UP_ARMED_BP] = "ARMED",
9900 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9901 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9902 [__HLS_DN_POLL_BP] = "POLL",
9903 [__HLS_DN_DISABLE_BP] = "DISABLE",
9904 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9905 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9906 [__HLS_GOING_UP_BP] = "GOING_UP",
9907 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9908 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9909 };
9910
9911 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9912 return name ? name : "unknown";
9913}
9914
9915/* return the link state reason name */
9916static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9917{
9918 if (state == HLS_UP_INIT) {
9919 switch (ppd->linkinit_reason) {
9920 case OPA_LINKINIT_REASON_LINKUP:
9921 return "(LINKUP)";
9922 case OPA_LINKINIT_REASON_FLAPPING:
9923 return "(FLAPPING)";
9924 case OPA_LINKINIT_OUTSIDE_POLICY:
9925 return "(OUTSIDE_POLICY)";
9926 case OPA_LINKINIT_QUARANTINED:
9927 return "(QUARANTINED)";
9928 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9929 return "(INSUFIC_CAPABILITY)";
9930 default:
9931 break;
9932 }
9933 }
9934 return "";
9935}
9936
9937/*
9938 * driver_physical_state - convert the driver's notion of a port's
9939 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9940 * Return -1 (converted to a u32) to indicate error.
9941 */
9942u32 driver_physical_state(struct hfi1_pportdata *ppd)
9943{
9944 switch (ppd->host_link_state) {
9945 case HLS_UP_INIT:
9946 case HLS_UP_ARMED:
9947 case HLS_UP_ACTIVE:
9948 return IB_PORTPHYSSTATE_LINKUP;
9949 case HLS_DN_POLL:
9950 return IB_PORTPHYSSTATE_POLLING;
9951 case HLS_DN_DISABLE:
9952 return IB_PORTPHYSSTATE_DISABLED;
9953 case HLS_DN_OFFLINE:
9954 return OPA_PORTPHYSSTATE_OFFLINE;
9955 case HLS_VERIFY_CAP:
9956 return IB_PORTPHYSSTATE_POLLING;
9957 case HLS_GOING_UP:
9958 return IB_PORTPHYSSTATE_POLLING;
9959 case HLS_GOING_OFFLINE:
9960 return OPA_PORTPHYSSTATE_OFFLINE;
9961 case HLS_LINK_COOLDOWN:
9962 return OPA_PORTPHYSSTATE_OFFLINE;
9963 case HLS_DN_DOWNDEF:
9964 default:
9965 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9966 ppd->host_link_state);
9967 return -1;
9968 }
9969}
9970
9971/*
9972 * driver_logical_state - convert the driver's notion of a port's
9973 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9974 * (converted to a u32) to indicate error.
9975 */
9976u32 driver_logical_state(struct hfi1_pportdata *ppd)
9977{
9978 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9979 return IB_PORT_DOWN;
9980
9981 switch (ppd->host_link_state & HLS_UP) {
9982 case HLS_UP_INIT:
9983 return IB_PORT_INIT;
9984 case HLS_UP_ARMED:
9985 return IB_PORT_ARMED;
9986 case HLS_UP_ACTIVE:
9987 return IB_PORT_ACTIVE;
9988 default:
9989 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9990 ppd->host_link_state);
9991 return -1;
9992 }
9993}
9994
9995void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9996 u8 neigh_reason, u8 rem_reason)
9997{
9998 if (ppd->local_link_down_reason.latest == 0 &&
9999 ppd->neigh_link_down_reason.latest == 0) {
10000 ppd->local_link_down_reason.latest = lcl_reason;
10001 ppd->neigh_link_down_reason.latest = neigh_reason;
10002 ppd->remote_link_down_reason = rem_reason;
10003 }
10004}
10005
10006/*
10007 * Change the physical and/or logical link state.
10008 *
10009 * Do not call this routine while inside an interrupt. It contains
10010 * calls to routines that can take multiple seconds to finish.
10011 *
10012 * Returns 0 on success, -errno on failure.
10013 */
10014int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10015{
10016 struct hfi1_devdata *dd = ppd->dd;
10017 struct ib_event event = {.device = NULL};
10018 int ret1, ret = 0;
10019 int was_up, is_down;
10020 int orig_new_state, poll_bounce;
10021
10022 mutex_lock(&ppd->hls_lock);
10023
10024 orig_new_state = state;
10025 if (state == HLS_DN_DOWNDEF)
10026 state = dd->link_default;
10027
10028 /* interpret poll -> poll as a link bounce */
10029 poll_bounce = ppd->host_link_state == HLS_DN_POLL
10030 && state == HLS_DN_POLL;
10031
10032 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10033 link_state_name(ppd->host_link_state),
10034 link_state_name(orig_new_state),
10035 poll_bounce ? "(bounce) " : "",
10036 link_state_reason_name(ppd, state));
10037
10038 was_up = !!(ppd->host_link_state & HLS_UP);
10039
10040 /*
10041 * If we're going to a (HLS_*) link state that implies the logical
10042 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10043 * reset is_sm_config_started to 0.
10044 */
10045 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10046 ppd->is_sm_config_started = 0;
10047
10048 /*
10049 * Do nothing if the states match. Let a poll to poll link bounce
10050 * go through.
10051 */
10052 if (ppd->host_link_state == state && !poll_bounce)
10053 goto done;
10054
10055 switch (state) {
10056 case HLS_UP_INIT:
10057 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
10058 || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10059 /*
10060 * Quick link up jumps from polling to here.
10061 *
10062 * Whether in normal or loopback mode, the
10063 * simulator jumps from polling to link up.
10064 * Accept that here.
10065 */
10066 /* OK */;
10067 } else if (ppd->host_link_state != HLS_GOING_UP) {
10068 goto unexpected;
10069 }
10070
10071 ppd->host_link_state = HLS_UP_INIT;
10072 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10073 if (ret) {
10074 /* logical state didn't change, stay at going_up */
10075 ppd->host_link_state = HLS_GOING_UP;
10076 dd_dev_err(dd,
10077 "%s: logical state did not change to INIT\n",
10078 __func__);
10079 } else {
10080 /* clear old transient LINKINIT_REASON code */
10081 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10082 ppd->linkinit_reason =
10083 OPA_LINKINIT_REASON_LINKUP;
10084
10085 /* enable the port */
10086 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10087
10088 handle_linkup_change(dd, 1);
10089 }
10090 break;
10091 case HLS_UP_ARMED:
10092 if (ppd->host_link_state != HLS_UP_INIT)
10093 goto unexpected;
10094
10095 ppd->host_link_state = HLS_UP_ARMED;
10096 set_logical_state(dd, LSTATE_ARMED);
10097 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10098 if (ret) {
10099 /* logical state didn't change, stay at init */
10100 ppd->host_link_state = HLS_UP_INIT;
10101 dd_dev_err(dd,
10102 "%s: logical state did not change to ARMED\n",
10103 __func__);
10104 }
10105 /*
10106 * The simulator does not currently implement SMA messages,
10107 * so neighbor_normal is not set. Set it here when we first
10108 * move to Armed.
10109 */
10110 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10111 ppd->neighbor_normal = 1;
10112 break;
10113 case HLS_UP_ACTIVE:
10114 if (ppd->host_link_state != HLS_UP_ARMED)
10115 goto unexpected;
10116
10117 ppd->host_link_state = HLS_UP_ACTIVE;
10118 set_logical_state(dd, LSTATE_ACTIVE);
10119 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10120 if (ret) {
10121 /* logical state didn't change, stay at armed */
10122 ppd->host_link_state = HLS_UP_ARMED;
10123 dd_dev_err(dd,
10124 "%s: logical state did not change to ACTIVE\n",
10125 __func__);
10126 } else {
10127
10128 /* tell all engines to go running */
10129 sdma_all_running(dd);
10130
10131 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010132 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010133 event.element.port_num = ppd->port;
10134 event.event = IB_EVENT_PORT_ACTIVE;
10135 }
10136 break;
10137 case HLS_DN_POLL:
10138 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10139 ppd->host_link_state == HLS_DN_OFFLINE) &&
10140 dd->dc_shutdown)
10141 dc_start(dd);
10142 /* Hand LED control to the DC */
10143 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10144
10145 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10146 u8 tmp = ppd->link_enabled;
10147
10148 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10149 if (ret) {
10150 ppd->link_enabled = tmp;
10151 break;
10152 }
10153 ppd->remote_link_down_reason = 0;
10154
10155 if (ppd->driver_link_ready)
10156 ppd->link_enabled = 1;
10157 }
10158
Jim Snowfb9036d2016-01-11 18:32:21 -050010159 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010160 ret = set_local_link_attributes(ppd);
10161 if (ret)
10162 break;
10163
10164 ppd->port_error_action = 0;
10165 ppd->host_link_state = HLS_DN_POLL;
10166
10167 if (quick_linkup) {
10168 /* quick linkup does not go into polling */
10169 ret = do_quick_linkup(dd);
10170 } else {
10171 ret1 = set_physical_link_state(dd, PLS_POLLING);
10172 if (ret1 != HCMD_SUCCESS) {
10173 dd_dev_err(dd,
10174 "Failed to transition to Polling link state, return 0x%x\n",
10175 ret1);
10176 ret = -EINVAL;
10177 }
10178 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010179 ppd->offline_disabled_reason =
10180 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010181 /*
10182 * If an error occurred above, go back to offline. The
10183 * caller may reschedule another attempt.
10184 */
10185 if (ret)
10186 goto_offline(ppd, 0);
10187 break;
10188 case HLS_DN_DISABLE:
10189 /* link is disabled */
10190 ppd->link_enabled = 0;
10191
10192 /* allow any state to transition to disabled */
10193
10194 /* must transition to offline first */
10195 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10196 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10197 if (ret)
10198 break;
10199 ppd->remote_link_down_reason = 0;
10200 }
10201
10202 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10203 if (ret1 != HCMD_SUCCESS) {
10204 dd_dev_err(dd,
10205 "Failed to transition to Disabled link state, return 0x%x\n",
10206 ret1);
10207 ret = -EINVAL;
10208 break;
10209 }
10210 ppd->host_link_state = HLS_DN_DISABLE;
10211 dc_shutdown(dd);
10212 break;
10213 case HLS_DN_OFFLINE:
10214 if (ppd->host_link_state == HLS_DN_DISABLE)
10215 dc_start(dd);
10216
10217 /* allow any state to transition to offline */
10218 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10219 if (!ret)
10220 ppd->remote_link_down_reason = 0;
10221 break;
10222 case HLS_VERIFY_CAP:
10223 if (ppd->host_link_state != HLS_DN_POLL)
10224 goto unexpected;
10225 ppd->host_link_state = HLS_VERIFY_CAP;
10226 break;
10227 case HLS_GOING_UP:
10228 if (ppd->host_link_state != HLS_VERIFY_CAP)
10229 goto unexpected;
10230
10231 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10232 if (ret1 != HCMD_SUCCESS) {
10233 dd_dev_err(dd,
10234 "Failed to transition to link up state, return 0x%x\n",
10235 ret1);
10236 ret = -EINVAL;
10237 break;
10238 }
10239 ppd->host_link_state = HLS_GOING_UP;
10240 break;
10241
10242 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10243 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10244 default:
10245 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10246 __func__, state);
10247 ret = -EINVAL;
10248 break;
10249 }
10250
10251 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10252 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10253
10254 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10255 ppd->neigh_link_down_reason.sma == 0) {
10256 ppd->local_link_down_reason.sma =
10257 ppd->local_link_down_reason.latest;
10258 ppd->neigh_link_down_reason.sma =
10259 ppd->neigh_link_down_reason.latest;
10260 }
10261
10262 goto done;
10263
10264unexpected:
10265 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10266 __func__, link_state_name(ppd->host_link_state),
10267 link_state_name(state));
10268 ret = -EINVAL;
10269
10270done:
10271 mutex_unlock(&ppd->hls_lock);
10272
10273 if (event.device)
10274 ib_dispatch_event(&event);
10275
10276 return ret;
10277}
10278
10279int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10280{
10281 u64 reg;
10282 int ret = 0;
10283
10284 switch (which) {
10285 case HFI1_IB_CFG_LIDLMC:
10286 set_lidlmc(ppd);
10287 break;
10288 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10289 /*
10290 * The VL Arbitrator high limit is sent in units of 4k
10291 * bytes, while HFI stores it in units of 64 bytes.
10292 */
10293 val *= 4096/64;
10294 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10295 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10296 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10297 break;
10298 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10299 /* HFI only supports POLL as the default link down state */
10300 if (val != HLS_DN_POLL)
10301 ret = -EINVAL;
10302 break;
10303 case HFI1_IB_CFG_OP_VLS:
10304 if (ppd->vls_operational != val) {
10305 ppd->vls_operational = val;
10306 if (!ppd->port)
10307 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010308 }
10309 break;
10310 /*
10311 * For link width, link width downgrade, and speed enable, always AND
10312 * the setting with what is actually supported. This has two benefits.
10313 * First, enabled can't have unsupported values, no matter what the
10314 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10315 * "fill in with your supported value" have all the bits in the
10316 * field set, so simply ANDing with supported has the desired result.
10317 */
10318 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10319 ppd->link_width_enabled = val & ppd->link_width_supported;
10320 break;
10321 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10322 ppd->link_width_downgrade_enabled =
10323 val & ppd->link_width_downgrade_supported;
10324 break;
10325 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10326 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10327 break;
10328 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10329 /*
10330 * HFI does not follow IB specs, save this value
10331 * so we can report it, if asked.
10332 */
10333 ppd->overrun_threshold = val;
10334 break;
10335 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10336 /*
10337 * HFI does not follow IB specs, save this value
10338 * so we can report it, if asked.
10339 */
10340 ppd->phy_error_threshold = val;
10341 break;
10342
10343 case HFI1_IB_CFG_MTU:
10344 set_send_length(ppd);
10345 break;
10346
10347 case HFI1_IB_CFG_PKEYS:
10348 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10349 set_partition_keys(ppd);
10350 break;
10351
10352 default:
10353 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10354 dd_dev_info(ppd->dd,
10355 "%s: which %s, val 0x%x: not implemented\n",
10356 __func__, ib_cfg_name(which), val);
10357 break;
10358 }
10359 return ret;
10360}
10361
10362/* begin functions related to vl arbitration table caching */
10363static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10364{
10365 int i;
10366
10367 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10368 VL_ARB_LOW_PRIO_TABLE_SIZE);
10369 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10370 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10371
10372 /*
10373 * Note that we always return values directly from the
10374 * 'vl_arb_cache' (and do no CSR reads) in response to a
10375 * 'Get(VLArbTable)'. This is obviously correct after a
10376 * 'Set(VLArbTable)', since the cache will then be up to
10377 * date. But it's also correct prior to any 'Set(VLArbTable)'
10378 * since then both the cache, and the relevant h/w registers
10379 * will be zeroed.
10380 */
10381
10382 for (i = 0; i < MAX_PRIO_TABLE; i++)
10383 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10384}
10385
10386/*
10387 * vl_arb_lock_cache
10388 *
10389 * All other vl_arb_* functions should be called only after locking
10390 * the cache.
10391 */
10392static inline struct vl_arb_cache *
10393vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10394{
10395 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10396 return NULL;
10397 spin_lock(&ppd->vl_arb_cache[idx].lock);
10398 return &ppd->vl_arb_cache[idx];
10399}
10400
10401static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10402{
10403 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10404}
10405
10406static void vl_arb_get_cache(struct vl_arb_cache *cache,
10407 struct ib_vl_weight_elem *vl)
10408{
10409 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10410}
10411
10412static void vl_arb_set_cache(struct vl_arb_cache *cache,
10413 struct ib_vl_weight_elem *vl)
10414{
10415 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10416}
10417
10418static int vl_arb_match_cache(struct vl_arb_cache *cache,
10419 struct ib_vl_weight_elem *vl)
10420{
10421 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10422}
10423/* end functions related to vl arbitration table caching */
10424
10425static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10426 u32 size, struct ib_vl_weight_elem *vl)
10427{
10428 struct hfi1_devdata *dd = ppd->dd;
10429 u64 reg;
10430 unsigned int i, is_up = 0;
10431 int drain, ret = 0;
10432
10433 mutex_lock(&ppd->hls_lock);
10434
10435 if (ppd->host_link_state & HLS_UP)
10436 is_up = 1;
10437
10438 drain = !is_ax(dd) && is_up;
10439
10440 if (drain)
10441 /*
10442 * Before adjusting VL arbitration weights, empty per-VL
10443 * FIFOs, otherwise a packet whose VL weight is being
10444 * set to 0 could get stuck in a FIFO with no chance to
10445 * egress.
10446 */
10447 ret = stop_drain_data_vls(dd);
10448
10449 if (ret) {
10450 dd_dev_err(
10451 dd,
10452 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10453 __func__);
10454 goto err;
10455 }
10456
10457 for (i = 0; i < size; i++, vl++) {
10458 /*
10459 * NOTE: The low priority shift and mask are used here, but
10460 * they are the same for both the low and high registers.
10461 */
10462 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10463 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10464 | (((u64)vl->weight
10465 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10466 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10467 write_csr(dd, target + (i * 8), reg);
10468 }
10469 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10470
10471 if (drain)
10472 open_fill_data_vls(dd); /* reopen all VLs */
10473
10474err:
10475 mutex_unlock(&ppd->hls_lock);
10476
10477 return ret;
10478}
10479
10480/*
10481 * Read one credit merge VL register.
10482 */
10483static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10484 struct vl_limit *vll)
10485{
10486 u64 reg = read_csr(dd, csr);
10487
10488 vll->dedicated = cpu_to_be16(
10489 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10490 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10491 vll->shared = cpu_to_be16(
10492 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10493 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10494}
10495
10496/*
10497 * Read the current credit merge limits.
10498 */
10499static int get_buffer_control(struct hfi1_devdata *dd,
10500 struct buffer_control *bc, u16 *overall_limit)
10501{
10502 u64 reg;
10503 int i;
10504
10505 /* not all entries are filled in */
10506 memset(bc, 0, sizeof(*bc));
10507
10508 /* OPA and HFI have a 1-1 mapping */
10509 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10510 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10511
10512 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10513 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10514
10515 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10516 bc->overall_shared_limit = cpu_to_be16(
10517 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10518 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10519 if (overall_limit)
10520 *overall_limit = (reg
10521 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10522 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10523 return sizeof(struct buffer_control);
10524}
10525
10526static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10527{
10528 u64 reg;
10529 int i;
10530
10531 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10532 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10533 for (i = 0; i < sizeof(u64); i++) {
10534 u8 byte = *(((u8 *)&reg) + i);
10535
10536 dp->vlnt[2 * i] = byte & 0xf;
10537 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10538 }
10539
10540 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10541 for (i = 0; i < sizeof(u64); i++) {
10542 u8 byte = *(((u8 *)&reg) + i);
10543
10544 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10545 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10546 }
10547 return sizeof(struct sc2vlnt);
10548}
10549
10550static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10551 struct ib_vl_weight_elem *vl)
10552{
10553 unsigned int i;
10554
10555 for (i = 0; i < nelems; i++, vl++) {
10556 vl->vl = 0xf;
10557 vl->weight = 0;
10558 }
10559}
10560
10561static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10562{
10563 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10564 DC_SC_VL_VAL(15_0,
10565 0, dp->vlnt[0] & 0xf,
10566 1, dp->vlnt[1] & 0xf,
10567 2, dp->vlnt[2] & 0xf,
10568 3, dp->vlnt[3] & 0xf,
10569 4, dp->vlnt[4] & 0xf,
10570 5, dp->vlnt[5] & 0xf,
10571 6, dp->vlnt[6] & 0xf,
10572 7, dp->vlnt[7] & 0xf,
10573 8, dp->vlnt[8] & 0xf,
10574 9, dp->vlnt[9] & 0xf,
10575 10, dp->vlnt[10] & 0xf,
10576 11, dp->vlnt[11] & 0xf,
10577 12, dp->vlnt[12] & 0xf,
10578 13, dp->vlnt[13] & 0xf,
10579 14, dp->vlnt[14] & 0xf,
10580 15, dp->vlnt[15] & 0xf));
10581 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10582 DC_SC_VL_VAL(31_16,
10583 16, dp->vlnt[16] & 0xf,
10584 17, dp->vlnt[17] & 0xf,
10585 18, dp->vlnt[18] & 0xf,
10586 19, dp->vlnt[19] & 0xf,
10587 20, dp->vlnt[20] & 0xf,
10588 21, dp->vlnt[21] & 0xf,
10589 22, dp->vlnt[22] & 0xf,
10590 23, dp->vlnt[23] & 0xf,
10591 24, dp->vlnt[24] & 0xf,
10592 25, dp->vlnt[25] & 0xf,
10593 26, dp->vlnt[26] & 0xf,
10594 27, dp->vlnt[27] & 0xf,
10595 28, dp->vlnt[28] & 0xf,
10596 29, dp->vlnt[29] & 0xf,
10597 30, dp->vlnt[30] & 0xf,
10598 31, dp->vlnt[31] & 0xf));
10599}
10600
10601static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10602 u16 limit)
10603{
10604 if (limit != 0)
10605 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10606 what, (int)limit, idx);
10607}
10608
10609/* change only the shared limit portion of SendCmGLobalCredit */
10610static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10611{
10612 u64 reg;
10613
10614 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10615 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10616 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10617 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10618}
10619
10620/* change only the total credit limit portion of SendCmGLobalCredit */
10621static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10622{
10623 u64 reg;
10624
10625 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10626 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10627 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10628 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10629}
10630
10631/* set the given per-VL shared limit */
10632static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10633{
10634 u64 reg;
10635 u32 addr;
10636
10637 if (vl < TXE_NUM_DATA_VL)
10638 addr = SEND_CM_CREDIT_VL + (8 * vl);
10639 else
10640 addr = SEND_CM_CREDIT_VL15;
10641
10642 reg = read_csr(dd, addr);
10643 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10644 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10645 write_csr(dd, addr, reg);
10646}
10647
10648/* set the given per-VL dedicated limit */
10649static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10650{
10651 u64 reg;
10652 u32 addr;
10653
10654 if (vl < TXE_NUM_DATA_VL)
10655 addr = SEND_CM_CREDIT_VL + (8 * vl);
10656 else
10657 addr = SEND_CM_CREDIT_VL15;
10658
10659 reg = read_csr(dd, addr);
10660 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10661 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10662 write_csr(dd, addr, reg);
10663}
10664
10665/* spin until the given per-VL status mask bits clear */
10666static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10667 const char *which)
10668{
10669 unsigned long timeout;
10670 u64 reg;
10671
10672 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10673 while (1) {
10674 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10675
10676 if (reg == 0)
10677 return; /* success */
10678 if (time_after(jiffies, timeout))
10679 break; /* timed out */
10680 udelay(1);
10681 }
10682
10683 dd_dev_err(dd,
10684 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10685 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10686 /*
10687 * If this occurs, it is likely there was a credit loss on the link.
10688 * The only recovery from that is a link bounce.
10689 */
10690 dd_dev_err(dd,
10691 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10692}
10693
10694/*
10695 * The number of credits on the VLs may be changed while everything
10696 * is "live", but the following algorithm must be followed due to
10697 * how the hardware is actually implemented. In particular,
10698 * Return_Credit_Status[] is the only correct status check.
10699 *
10700 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10701 * set Global_Shared_Credit_Limit = 0
10702 * use_all_vl = 1
10703 * mask0 = all VLs that are changing either dedicated or shared limits
10704 * set Shared_Limit[mask0] = 0
10705 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10706 * if (changing any dedicated limit)
10707 * mask1 = all VLs that are lowering dedicated limits
10708 * lower Dedicated_Limit[mask1]
10709 * spin until Return_Credit_Status[mask1] == 0
10710 * raise Dedicated_Limits
10711 * raise Shared_Limits
10712 * raise Global_Shared_Credit_Limit
10713 *
10714 * lower = if the new limit is lower, set the limit to the new value
10715 * raise = if the new limit is higher than the current value (may be changed
10716 * earlier in the algorithm), set the new limit to the new value
10717 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010718int set_buffer_control(struct hfi1_pportdata *ppd,
10719 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040010720{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010721 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010722 u64 changing_mask, ld_mask, stat_mask;
10723 int change_count;
10724 int i, use_all_mask;
10725 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010726 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010727 /*
10728 * A0: add the variable any_shared_limit_changing below and in the
10729 * algorithm above. If removing A0 support, it can be removed.
10730 */
10731 int any_shared_limit_changing;
10732 struct buffer_control cur_bc;
10733 u8 changing[OPA_MAX_VLS];
10734 u8 lowering_dedicated[OPA_MAX_VLS];
10735 u16 cur_total;
10736 u32 new_total = 0;
10737 const u64 all_mask =
10738 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10739 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10740 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10741 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10742 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10743 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10744 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10745 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10746 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10747
10748#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10749#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10750
10751
10752 /* find the new total credits, do sanity check on unused VLs */
10753 for (i = 0; i < OPA_MAX_VLS; i++) {
10754 if (valid_vl(i)) {
10755 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10756 continue;
10757 }
10758 nonzero_msg(dd, i, "dedicated",
10759 be16_to_cpu(new_bc->vl[i].dedicated));
10760 nonzero_msg(dd, i, "shared",
10761 be16_to_cpu(new_bc->vl[i].shared));
10762 new_bc->vl[i].dedicated = 0;
10763 new_bc->vl[i].shared = 0;
10764 }
10765 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010766
Mike Marciniszyn77241052015-07-30 15:17:43 -040010767 /* fetch the current values */
10768 get_buffer_control(dd, &cur_bc, &cur_total);
10769
10770 /*
10771 * Create the masks we will use.
10772 */
10773 memset(changing, 0, sizeof(changing));
10774 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10775 /* NOTE: Assumes that the individual VL bits are adjacent and in
10776 increasing order */
10777 stat_mask =
10778 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10779 changing_mask = 0;
10780 ld_mask = 0;
10781 change_count = 0;
10782 any_shared_limit_changing = 0;
10783 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10784 if (!valid_vl(i))
10785 continue;
10786 this_shared_changing = new_bc->vl[i].shared
10787 != cur_bc.vl[i].shared;
10788 if (this_shared_changing)
10789 any_shared_limit_changing = 1;
10790 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10791 || this_shared_changing) {
10792 changing[i] = 1;
10793 changing_mask |= stat_mask;
10794 change_count++;
10795 }
10796 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10797 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10798 lowering_dedicated[i] = 1;
10799 ld_mask |= stat_mask;
10800 }
10801 }
10802
10803 /* bracket the credit change with a total adjustment */
10804 if (new_total > cur_total)
10805 set_global_limit(dd, new_total);
10806
10807 /*
10808 * Start the credit change algorithm.
10809 */
10810 use_all_mask = 0;
10811 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010812 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10813 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010814 set_global_shared(dd, 0);
10815 cur_bc.overall_shared_limit = 0;
10816 use_all_mask = 1;
10817 }
10818
10819 for (i = 0; i < NUM_USABLE_VLS; i++) {
10820 if (!valid_vl(i))
10821 continue;
10822
10823 if (changing[i]) {
10824 set_vl_shared(dd, i, 0);
10825 cur_bc.vl[i].shared = 0;
10826 }
10827 }
10828
10829 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10830 "shared");
10831
10832 if (change_count > 0) {
10833 for (i = 0; i < NUM_USABLE_VLS; i++) {
10834 if (!valid_vl(i))
10835 continue;
10836
10837 if (lowering_dedicated[i]) {
10838 set_vl_dedicated(dd, i,
10839 be16_to_cpu(new_bc->vl[i].dedicated));
10840 cur_bc.vl[i].dedicated =
10841 new_bc->vl[i].dedicated;
10842 }
10843 }
10844
10845 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10846
10847 /* now raise all dedicated that are going up */
10848 for (i = 0; i < NUM_USABLE_VLS; i++) {
10849 if (!valid_vl(i))
10850 continue;
10851
10852 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10853 be16_to_cpu(cur_bc.vl[i].dedicated))
10854 set_vl_dedicated(dd, i,
10855 be16_to_cpu(new_bc->vl[i].dedicated));
10856 }
10857 }
10858
10859 /* next raise all shared that are going up */
10860 for (i = 0; i < NUM_USABLE_VLS; i++) {
10861 if (!valid_vl(i))
10862 continue;
10863
10864 if (be16_to_cpu(new_bc->vl[i].shared) >
10865 be16_to_cpu(cur_bc.vl[i].shared))
10866 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10867 }
10868
10869 /* finally raise the global shared */
10870 if (be16_to_cpu(new_bc->overall_shared_limit) >
10871 be16_to_cpu(cur_bc.overall_shared_limit))
10872 set_global_shared(dd,
10873 be16_to_cpu(new_bc->overall_shared_limit));
10874
10875 /* bracket the credit change with a total adjustment */
10876 if (new_total < cur_total)
10877 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010878
10879 /*
10880 * Determine the actual number of operational VLS using the number of
10881 * dedicated and shared credits for each VL.
10882 */
10883 if (change_count > 0) {
10884 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10885 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10886 be16_to_cpu(new_bc->vl[i].shared) > 0)
10887 vl_count++;
10888 ppd->actual_vls_operational = vl_count;
10889 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10890 ppd->actual_vls_operational :
10891 ppd->vls_operational,
10892 NULL);
10893 if (ret == 0)
10894 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
10895 ppd->actual_vls_operational :
10896 ppd->vls_operational, NULL);
10897 if (ret)
10898 return ret;
10899 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040010900 return 0;
10901}
10902
10903/*
10904 * Read the given fabric manager table. Return the size of the
10905 * table (in bytes) on success, and a negative error code on
10906 * failure.
10907 */
10908int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10909
10910{
10911 int size;
10912 struct vl_arb_cache *vlc;
10913
10914 switch (which) {
10915 case FM_TBL_VL_HIGH_ARB:
10916 size = 256;
10917 /*
10918 * OPA specifies 128 elements (of 2 bytes each), though
10919 * HFI supports only 16 elements in h/w.
10920 */
10921 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10922 vl_arb_get_cache(vlc, t);
10923 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10924 break;
10925 case FM_TBL_VL_LOW_ARB:
10926 size = 256;
10927 /*
10928 * OPA specifies 128 elements (of 2 bytes each), though
10929 * HFI supports only 16 elements in h/w.
10930 */
10931 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10932 vl_arb_get_cache(vlc, t);
10933 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10934 break;
10935 case FM_TBL_BUFFER_CONTROL:
10936 size = get_buffer_control(ppd->dd, t, NULL);
10937 break;
10938 case FM_TBL_SC2VLNT:
10939 size = get_sc2vlnt(ppd->dd, t);
10940 break;
10941 case FM_TBL_VL_PREEMPT_ELEMS:
10942 size = 256;
10943 /* OPA specifies 128 elements, of 2 bytes each */
10944 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10945 break;
10946 case FM_TBL_VL_PREEMPT_MATRIX:
10947 size = 256;
10948 /*
10949 * OPA specifies that this is the same size as the VL
10950 * arbitration tables (i.e., 256 bytes).
10951 */
10952 break;
10953 default:
10954 return -EINVAL;
10955 }
10956 return size;
10957}
10958
10959/*
10960 * Write the given fabric manager table.
10961 */
10962int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10963{
10964 int ret = 0;
10965 struct vl_arb_cache *vlc;
10966
10967 switch (which) {
10968 case FM_TBL_VL_HIGH_ARB:
10969 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10970 if (vl_arb_match_cache(vlc, t)) {
10971 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10972 break;
10973 }
10974 vl_arb_set_cache(vlc, t);
10975 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10976 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10977 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10978 break;
10979 case FM_TBL_VL_LOW_ARB:
10980 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10981 if (vl_arb_match_cache(vlc, t)) {
10982 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10983 break;
10984 }
10985 vl_arb_set_cache(vlc, t);
10986 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10987 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10988 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10989 break;
10990 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010991 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010992 break;
10993 case FM_TBL_SC2VLNT:
10994 set_sc2vlnt(ppd->dd, t);
10995 break;
10996 default:
10997 ret = -EINVAL;
10998 }
10999 return ret;
11000}
11001
11002/*
11003 * Disable all data VLs.
11004 *
11005 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11006 */
11007static int disable_data_vls(struct hfi1_devdata *dd)
11008{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011009 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011010 return 1;
11011
11012 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11013
11014 return 0;
11015}
11016
11017/*
11018 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11019 * Just re-enables all data VLs (the "fill" part happens
11020 * automatically - the name was chosen for symmetry with
11021 * stop_drain_data_vls()).
11022 *
11023 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11024 */
11025int open_fill_data_vls(struct hfi1_devdata *dd)
11026{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011027 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011028 return 1;
11029
11030 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11031
11032 return 0;
11033}
11034
11035/*
11036 * drain_data_vls() - assumes that disable_data_vls() has been called,
11037 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11038 * engines to drop to 0.
11039 */
11040static void drain_data_vls(struct hfi1_devdata *dd)
11041{
11042 sc_wait(dd);
11043 sdma_wait(dd);
11044 pause_for_credit_return(dd);
11045}
11046
11047/*
11048 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11049 *
11050 * Use open_fill_data_vls() to resume using data VLs. This pair is
11051 * meant to be used like this:
11052 *
11053 * stop_drain_data_vls(dd);
11054 * // do things with per-VL resources
11055 * open_fill_data_vls(dd);
11056 */
11057int stop_drain_data_vls(struct hfi1_devdata *dd)
11058{
11059 int ret;
11060
11061 ret = disable_data_vls(dd);
11062 if (ret == 0)
11063 drain_data_vls(dd);
11064
11065 return ret;
11066}
11067
11068/*
11069 * Convert a nanosecond time to a cclock count. No matter how slow
11070 * the cclock, a non-zero ns will always have a non-zero result.
11071 */
11072u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11073{
11074 u32 cclocks;
11075
11076 if (dd->icode == ICODE_FPGA_EMULATION)
11077 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11078 else /* simulation pretends to be ASIC */
11079 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11080 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11081 cclocks = 1;
11082 return cclocks;
11083}
11084
11085/*
11086 * Convert a cclock count to nanoseconds. Not matter how slow
11087 * the cclock, a non-zero cclocks will always have a non-zero result.
11088 */
11089u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11090{
11091 u32 ns;
11092
11093 if (dd->icode == ICODE_FPGA_EMULATION)
11094 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11095 else /* simulation pretends to be ASIC */
11096 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11097 if (cclocks && !ns)
11098 ns = 1;
11099 return ns;
11100}
11101
11102/*
11103 * Dynamically adjust the receive interrupt timeout for a context based on
11104 * incoming packet rate.
11105 *
11106 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11107 */
11108static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11109{
11110 struct hfi1_devdata *dd = rcd->dd;
11111 u32 timeout = rcd->rcvavail_timeout;
11112
11113 /*
11114 * This algorithm doubles or halves the timeout depending on whether
11115 * the number of packets received in this interrupt were less than or
11116 * greater equal the interrupt count.
11117 *
11118 * The calculations below do not allow a steady state to be achieved.
11119 * Only at the endpoints it is possible to have an unchanging
11120 * timeout.
11121 */
11122 if (npkts < rcv_intr_count) {
11123 /*
11124 * Not enough packets arrived before the timeout, adjust
11125 * timeout downward.
11126 */
11127 if (timeout < 2) /* already at minimum? */
11128 return;
11129 timeout >>= 1;
11130 } else {
11131 /*
11132 * More than enough packets arrived before the timeout, adjust
11133 * timeout upward.
11134 */
11135 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11136 return;
11137 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11138 }
11139
11140 rcd->rcvavail_timeout = timeout;
11141 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
11142 been verified to be in range */
11143 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11144 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11145}
11146
11147void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11148 u32 intr_adjust, u32 npkts)
11149{
11150 struct hfi1_devdata *dd = rcd->dd;
11151 u64 reg;
11152 u32 ctxt = rcd->ctxt;
11153
11154 /*
11155 * Need to write timeout register before updating RcvHdrHead to ensure
11156 * that a new value is used when the HW decides to restart counting.
11157 */
11158 if (intr_adjust)
11159 adjust_rcv_timeout(rcd, npkts);
11160 if (updegr) {
11161 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11162 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11163 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11164 }
11165 mmiowb();
11166 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11167 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11168 << RCV_HDR_HEAD_HEAD_SHIFT);
11169 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11170 mmiowb();
11171}
11172
11173u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11174{
11175 u32 head, tail;
11176
11177 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11178 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11179
11180 if (rcd->rcvhdrtail_kvaddr)
11181 tail = get_rcvhdrtail(rcd);
11182 else
11183 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11184
11185 return head == tail;
11186}
11187
11188/*
11189 * Context Control and Receive Array encoding for buffer size:
11190 * 0x0 invalid
11191 * 0x1 4 KB
11192 * 0x2 8 KB
11193 * 0x3 16 KB
11194 * 0x4 32 KB
11195 * 0x5 64 KB
11196 * 0x6 128 KB
11197 * 0x7 256 KB
11198 * 0x8 512 KB (Receive Array only)
11199 * 0x9 1 MB (Receive Array only)
11200 * 0xa 2 MB (Receive Array only)
11201 *
11202 * 0xB-0xF - reserved (Receive Array only)
11203 *
11204 *
11205 * This routine assumes that the value has already been sanity checked.
11206 */
11207static u32 encoded_size(u32 size)
11208{
11209 switch (size) {
11210 case 4*1024: return 0x1;
11211 case 8*1024: return 0x2;
11212 case 16*1024: return 0x3;
11213 case 32*1024: return 0x4;
11214 case 64*1024: return 0x5;
11215 case 128*1024: return 0x6;
11216 case 256*1024: return 0x7;
11217 case 512*1024: return 0x8;
11218 case 1*1024*1024: return 0x9;
11219 case 2*1024*1024: return 0xa;
11220 }
11221 return 0x1; /* if invalid, go with the minimum size */
11222}
11223
11224void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11225{
11226 struct hfi1_ctxtdata *rcd;
11227 u64 rcvctrl, reg;
11228 int did_enable = 0;
11229
11230 rcd = dd->rcd[ctxt];
11231 if (!rcd)
11232 return;
11233
11234 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11235
11236 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11237 /* if the context already enabled, don't do the extra steps */
11238 if ((op & HFI1_RCVCTRL_CTXT_ENB)
11239 && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11240 /* reset the tail and hdr addresses, and sequence count */
11241 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11242 rcd->rcvhdrq_phys);
11243 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11244 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11245 rcd->rcvhdrqtailaddr_phys);
11246 rcd->seq_cnt = 1;
11247
11248 /* reset the cached receive header queue head value */
11249 rcd->head = 0;
11250
11251 /*
11252 * Zero the receive header queue so we don't get false
11253 * positives when checking the sequence number. The
11254 * sequence numbers could land exactly on the same spot.
11255 * E.g. a rcd restart before the receive header wrapped.
11256 */
11257 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11258
11259 /* starting timeout */
11260 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11261
11262 /* enable the context */
11263 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11264
11265 /* clean the egr buffer size first */
11266 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11267 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11268 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11269 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11270
11271 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11272 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11273 did_enable = 1;
11274
11275 /* zero RcvEgrIndexHead */
11276 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11277
11278 /* set eager count and base index */
11279 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11280 & RCV_EGR_CTRL_EGR_CNT_MASK)
11281 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11282 (((rcd->eager_base >> RCV_SHIFT)
11283 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11284 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11285 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11286
11287 /*
11288 * Set TID (expected) count and base index.
11289 * rcd->expected_count is set to individual RcvArray entries,
11290 * not pairs, and the CSR takes a pair-count in groups of
11291 * four, so divide by 8.
11292 */
11293 reg = (((rcd->expected_count >> RCV_SHIFT)
11294 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11295 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11296 (((rcd->expected_base >> RCV_SHIFT)
11297 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11298 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11299 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011300 if (ctxt == HFI1_CTRL_CTXT)
11301 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011302 }
11303 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11304 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011305 /*
11306 * When receive context is being disabled turn on tail
11307 * update with a dummy tail address and then disable
11308 * receive context.
11309 */
11310 if (dd->rcvhdrtail_dummy_physaddr) {
11311 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11312 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011313 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011314 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11315 }
11316
Mike Marciniszyn77241052015-07-30 15:17:43 -040011317 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11318 }
11319 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11320 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11321 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11322 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11323 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11324 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011325 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11326 /* See comment on RcvCtxtCtrl.TailUpd above */
11327 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11328 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11329 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011330 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11331 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11332 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11333 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11334 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11335 /* In one-packet-per-eager mode, the size comes from
11336 the RcvArray entry. */
11337 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11338 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11339 }
11340 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11341 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11342 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11343 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11344 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11345 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11346 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11347 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11348 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11349 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11350 rcd->rcvctrl = rcvctrl;
11351 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11352 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11353
11354 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11355 if (did_enable
11356 && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11357 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11358 if (reg != 0) {
11359 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11360 ctxt, reg);
11361 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11362 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11363 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11364 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11365 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11366 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11367 ctxt, reg, reg == 0 ? "not" : "still");
11368 }
11369 }
11370
11371 if (did_enable) {
11372 /*
11373 * The interrupt timeout and count must be set after
11374 * the context is enabled to take effect.
11375 */
11376 /* set interrupt timeout */
11377 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11378 (u64)rcd->rcvavail_timeout <<
11379 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11380
11381 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11382 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11383 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11384 }
11385
11386 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11387 /*
11388 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011389 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11390 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011391 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011392 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11393 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011394}
11395
11396u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11397 u64 **cntrp)
11398{
11399 int ret;
11400 u64 val = 0;
11401
11402 if (namep) {
11403 ret = dd->cntrnameslen;
11404 if (pos != 0) {
11405 dd_dev_err(dd, "read_cntrs does not support indexing");
11406 return 0;
11407 }
11408 *namep = dd->cntrnames;
11409 } else {
11410 const struct cntr_entry *entry;
11411 int i, j;
11412
11413 ret = (dd->ndevcntrs) * sizeof(u64);
11414 if (pos != 0) {
11415 dd_dev_err(dd, "read_cntrs does not support indexing");
11416 return 0;
11417 }
11418
11419 /* Get the start of the block of counters */
11420 *cntrp = dd->cntrs;
11421
11422 /*
11423 * Now go and fill in each counter in the block.
11424 */
11425 for (i = 0; i < DEV_CNTR_LAST; i++) {
11426 entry = &dev_cntrs[i];
11427 hfi1_cdbg(CNTR, "reading %s", entry->name);
11428 if (entry->flags & CNTR_DISABLED) {
11429 /* Nothing */
11430 hfi1_cdbg(CNTR, "\tDisabled\n");
11431 } else {
11432 if (entry->flags & CNTR_VL) {
11433 hfi1_cdbg(CNTR, "\tPer VL\n");
11434 for (j = 0; j < C_VL_COUNT; j++) {
11435 val = entry->rw_cntr(entry,
11436 dd, j,
11437 CNTR_MODE_R,
11438 0);
11439 hfi1_cdbg(
11440 CNTR,
11441 "\t\tRead 0x%llx for %d\n",
11442 val, j);
11443 dd->cntrs[entry->offset + j] =
11444 val;
11445 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011446 } else if (entry->flags & CNTR_SDMA) {
11447 hfi1_cdbg(CNTR,
11448 "\t Per SDMA Engine\n");
11449 for (j = 0; j < dd->chip_sdma_engines;
11450 j++) {
11451 val =
11452 entry->rw_cntr(entry, dd, j,
11453 CNTR_MODE_R, 0);
11454 hfi1_cdbg(CNTR,
11455 "\t\tRead 0x%llx for %d\n",
11456 val, j);
11457 dd->cntrs[entry->offset + j] =
11458 val;
11459 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011460 } else {
11461 val = entry->rw_cntr(entry, dd,
11462 CNTR_INVALID_VL,
11463 CNTR_MODE_R, 0);
11464 dd->cntrs[entry->offset] = val;
11465 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11466 }
11467 }
11468 }
11469 }
11470 return ret;
11471}
11472
11473/*
11474 * Used by sysfs to create files for hfi stats to read
11475 */
11476u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11477 char **namep, u64 **cntrp)
11478{
11479 int ret;
11480 u64 val = 0;
11481
11482 if (namep) {
11483 ret = dd->portcntrnameslen;
11484 if (pos != 0) {
11485 dd_dev_err(dd, "index not supported");
11486 return 0;
11487 }
11488 *namep = dd->portcntrnames;
11489 } else {
11490 const struct cntr_entry *entry;
11491 struct hfi1_pportdata *ppd;
11492 int i, j;
11493
11494 ret = (dd->nportcntrs) * sizeof(u64);
11495 if (pos != 0) {
11496 dd_dev_err(dd, "indexing not supported");
11497 return 0;
11498 }
11499 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11500 *cntrp = ppd->cntrs;
11501
11502 for (i = 0; i < PORT_CNTR_LAST; i++) {
11503 entry = &port_cntrs[i];
11504 hfi1_cdbg(CNTR, "reading %s", entry->name);
11505 if (entry->flags & CNTR_DISABLED) {
11506 /* Nothing */
11507 hfi1_cdbg(CNTR, "\tDisabled\n");
11508 continue;
11509 }
11510
11511 if (entry->flags & CNTR_VL) {
11512 hfi1_cdbg(CNTR, "\tPer VL");
11513 for (j = 0; j < C_VL_COUNT; j++) {
11514 val = entry->rw_cntr(entry, ppd, j,
11515 CNTR_MODE_R,
11516 0);
11517 hfi1_cdbg(
11518 CNTR,
11519 "\t\tRead 0x%llx for %d",
11520 val, j);
11521 ppd->cntrs[entry->offset + j] = val;
11522 }
11523 } else {
11524 val = entry->rw_cntr(entry, ppd,
11525 CNTR_INVALID_VL,
11526 CNTR_MODE_R,
11527 0);
11528 ppd->cntrs[entry->offset] = val;
11529 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11530 }
11531 }
11532 }
11533 return ret;
11534}
11535
11536static void free_cntrs(struct hfi1_devdata *dd)
11537{
11538 struct hfi1_pportdata *ppd;
11539 int i;
11540
11541 if (dd->synth_stats_timer.data)
11542 del_timer_sync(&dd->synth_stats_timer);
11543 dd->synth_stats_timer.data = 0;
11544 ppd = (struct hfi1_pportdata *)(dd + 1);
11545 for (i = 0; i < dd->num_pports; i++, ppd++) {
11546 kfree(ppd->cntrs);
11547 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011548 free_percpu(ppd->ibport_data.rvp.rc_acks);
11549 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11550 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011551 ppd->cntrs = NULL;
11552 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011553 ppd->ibport_data.rvp.rc_acks = NULL;
11554 ppd->ibport_data.rvp.rc_qacks = NULL;
11555 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011556 }
11557 kfree(dd->portcntrnames);
11558 dd->portcntrnames = NULL;
11559 kfree(dd->cntrs);
11560 dd->cntrs = NULL;
11561 kfree(dd->scntrs);
11562 dd->scntrs = NULL;
11563 kfree(dd->cntrnames);
11564 dd->cntrnames = NULL;
11565}
11566
11567#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11568#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11569
11570static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11571 u64 *psval, void *context, int vl)
11572{
11573 u64 val;
11574 u64 sval = *psval;
11575
11576 if (entry->flags & CNTR_DISABLED) {
11577 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11578 return 0;
11579 }
11580
11581 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11582
11583 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11584
11585 /* If its a synthetic counter there is more work we need to do */
11586 if (entry->flags & CNTR_SYNTH) {
11587 if (sval == CNTR_MAX) {
11588 /* No need to read already saturated */
11589 return CNTR_MAX;
11590 }
11591
11592 if (entry->flags & CNTR_32BIT) {
11593 /* 32bit counters can wrap multiple times */
11594 u64 upper = sval >> 32;
11595 u64 lower = (sval << 32) >> 32;
11596
11597 if (lower > val) { /* hw wrapped */
11598 if (upper == CNTR_32BIT_MAX)
11599 val = CNTR_MAX;
11600 else
11601 upper++;
11602 }
11603
11604 if (val != CNTR_MAX)
11605 val = (upper << 32) | val;
11606
11607 } else {
11608 /* If we rolled we are saturated */
11609 if ((val < sval) || (val > CNTR_MAX))
11610 val = CNTR_MAX;
11611 }
11612 }
11613
11614 *psval = val;
11615
11616 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11617
11618 return val;
11619}
11620
11621static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11622 struct cntr_entry *entry,
11623 u64 *psval, void *context, int vl, u64 data)
11624{
11625 u64 val;
11626
11627 if (entry->flags & CNTR_DISABLED) {
11628 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11629 return 0;
11630 }
11631
11632 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11633
11634 if (entry->flags & CNTR_SYNTH) {
11635 *psval = data;
11636 if (entry->flags & CNTR_32BIT) {
11637 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11638 (data << 32) >> 32);
11639 val = data; /* return the full 64bit value */
11640 } else {
11641 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11642 data);
11643 }
11644 } else {
11645 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11646 }
11647
11648 *psval = val;
11649
11650 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11651
11652 return val;
11653}
11654
11655u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11656{
11657 struct cntr_entry *entry;
11658 u64 *sval;
11659
11660 entry = &dev_cntrs[index];
11661 sval = dd->scntrs + entry->offset;
11662
11663 if (vl != CNTR_INVALID_VL)
11664 sval += vl;
11665
11666 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11667}
11668
11669u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11670{
11671 struct cntr_entry *entry;
11672 u64 *sval;
11673
11674 entry = &dev_cntrs[index];
11675 sval = dd->scntrs + entry->offset;
11676
11677 if (vl != CNTR_INVALID_VL)
11678 sval += vl;
11679
11680 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11681}
11682
11683u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11684{
11685 struct cntr_entry *entry;
11686 u64 *sval;
11687
11688 entry = &port_cntrs[index];
11689 sval = ppd->scntrs + entry->offset;
11690
11691 if (vl != CNTR_INVALID_VL)
11692 sval += vl;
11693
11694 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11695 (index <= C_RCV_HDR_OVF_LAST)) {
11696 /* We do not want to bother for disabled contexts */
11697 return 0;
11698 }
11699
11700 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11701}
11702
11703u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11704{
11705 struct cntr_entry *entry;
11706 u64 *sval;
11707
11708 entry = &port_cntrs[index];
11709 sval = ppd->scntrs + entry->offset;
11710
11711 if (vl != CNTR_INVALID_VL)
11712 sval += vl;
11713
11714 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11715 (index <= C_RCV_HDR_OVF_LAST)) {
11716 /* We do not want to bother for disabled contexts */
11717 return 0;
11718 }
11719
11720 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11721}
11722
11723static void update_synth_timer(unsigned long opaque)
11724{
11725 u64 cur_tx;
11726 u64 cur_rx;
11727 u64 total_flits;
11728 u8 update = 0;
11729 int i, j, vl;
11730 struct hfi1_pportdata *ppd;
11731 struct cntr_entry *entry;
11732
11733 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11734
11735 /*
11736 * Rather than keep beating on the CSRs pick a minimal set that we can
11737 * check to watch for potential roll over. We can do this by looking at
11738 * the number of flits sent/recv. If the total flits exceeds 32bits then
11739 * we have to iterate all the counters and update.
11740 */
11741 entry = &dev_cntrs[C_DC_RCV_FLITS];
11742 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11743
11744 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11745 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11746
11747 hfi1_cdbg(
11748 CNTR,
11749 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11750 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11751
11752 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11753 /*
11754 * May not be strictly necessary to update but it won't hurt and
11755 * simplifies the logic here.
11756 */
11757 update = 1;
11758 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11759 dd->unit);
11760 } else {
11761 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11762 hfi1_cdbg(CNTR,
11763 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11764 total_flits, (u64)CNTR_32BIT_MAX);
11765 if (total_flits >= CNTR_32BIT_MAX) {
11766 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11767 dd->unit);
11768 update = 1;
11769 }
11770 }
11771
11772 if (update) {
11773 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11774 for (i = 0; i < DEV_CNTR_LAST; i++) {
11775 entry = &dev_cntrs[i];
11776 if (entry->flags & CNTR_VL) {
11777 for (vl = 0; vl < C_VL_COUNT; vl++)
11778 read_dev_cntr(dd, i, vl);
11779 } else {
11780 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11781 }
11782 }
11783 ppd = (struct hfi1_pportdata *)(dd + 1);
11784 for (i = 0; i < dd->num_pports; i++, ppd++) {
11785 for (j = 0; j < PORT_CNTR_LAST; j++) {
11786 entry = &port_cntrs[j];
11787 if (entry->flags & CNTR_VL) {
11788 for (vl = 0; vl < C_VL_COUNT; vl++)
11789 read_port_cntr(ppd, j, vl);
11790 } else {
11791 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11792 }
11793 }
11794 }
11795
11796 /*
11797 * We want the value in the register. The goal is to keep track
11798 * of the number of "ticks" not the counter value. In other
11799 * words if the register rolls we want to notice it and go ahead
11800 * and force an update.
11801 */
11802 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11803 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11804 CNTR_MODE_R, 0);
11805
11806 entry = &dev_cntrs[C_DC_RCV_FLITS];
11807 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11808 CNTR_MODE_R, 0);
11809
11810 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11811 dd->unit, dd->last_tx, dd->last_rx);
11812
11813 } else {
11814 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11815 }
11816
11817mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11818}
11819
11820#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11821static int init_cntrs(struct hfi1_devdata *dd)
11822{
Dean Luickc024c552016-01-11 18:30:57 -050011823 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011824 size_t sz;
11825 char *p;
11826 char name[C_MAX_NAME];
11827 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011828 const char *bit_type_32 = ",32";
11829 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011830
11831 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011832 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11833 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011834
11835 /***********************/
11836 /* per device counters */
11837 /***********************/
11838
11839 /* size names and determine how many we have*/
11840 dd->ndevcntrs = 0;
11841 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011842
11843 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011844 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11845 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11846 continue;
11847 }
11848
11849 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050011850 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011851 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011852 snprintf(name, C_MAX_NAME, "%s%d",
11853 dev_cntrs[i].name,
11854 vl_from_idx(j));
11855 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011856 /* Add ",32" for 32-bit counters */
11857 if (dev_cntrs[i].flags & CNTR_32BIT)
11858 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011859 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011860 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011861 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011862 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050011863 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011864 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011865 snprintf(name, C_MAX_NAME, "%s%d",
11866 dev_cntrs[i].name, j);
11867 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011868 /* Add ",32" for 32-bit counters */
11869 if (dev_cntrs[i].flags & CNTR_32BIT)
11870 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011871 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011872 dd->ndevcntrs++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011873 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011874 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011875 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011876 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011877 /* Add ",32" for 32-bit counters */
11878 if (dev_cntrs[i].flags & CNTR_32BIT)
11879 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050011880 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011881 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011882 }
11883 }
11884
11885 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011886 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011887 if (!dd->cntrs)
11888 goto bail;
11889
Dean Luickc024c552016-01-11 18:30:57 -050011890 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011891 if (!dd->scntrs)
11892 goto bail;
11893
11894
11895 /* allocate space for the counter names */
11896 dd->cntrnameslen = sz;
11897 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11898 if (!dd->cntrnames)
11899 goto bail;
11900
11901 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011902 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011903 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11904 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011905 } else if (dev_cntrs[i].flags & CNTR_VL) {
11906 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011907 snprintf(name, C_MAX_NAME, "%s%d",
11908 dev_cntrs[i].name,
11909 vl_from_idx(j));
11910 memcpy(p, name, strlen(name));
11911 p += strlen(name);
11912
11913 /* Counter is 32 bits */
11914 if (dev_cntrs[i].flags & CNTR_32BIT) {
11915 memcpy(p, bit_type_32, bit_type_32_sz);
11916 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011917 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011918
Mike Marciniszyn77241052015-07-30 15:17:43 -040011919 *p++ = '\n';
11920 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011921 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11922 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011923 snprintf(name, C_MAX_NAME, "%s%d",
11924 dev_cntrs[i].name, j);
11925 memcpy(p, name, strlen(name));
11926 p += strlen(name);
11927
11928 /* Counter is 32 bits */
11929 if (dev_cntrs[i].flags & CNTR_32BIT) {
11930 memcpy(p, bit_type_32, bit_type_32_sz);
11931 p += bit_type_32_sz;
11932 }
11933
11934 *p++ = '\n';
11935 }
11936 } else {
11937 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
11938 p += strlen(dev_cntrs[i].name);
11939
11940 /* Counter is 32 bits */
11941 if (dev_cntrs[i].flags & CNTR_32BIT) {
11942 memcpy(p, bit_type_32, bit_type_32_sz);
11943 p += bit_type_32_sz;
11944 }
11945
11946 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040011947 }
11948 }
11949
11950 /*********************/
11951 /* per port counters */
11952 /*********************/
11953
11954 /*
11955 * Go through the counters for the overflows and disable the ones we
11956 * don't need. This varies based on platform so we need to do it
11957 * dynamically here.
11958 */
11959 rcv_ctxts = dd->num_rcv_contexts;
11960 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11961 i <= C_RCV_HDR_OVF_LAST; i++) {
11962 port_cntrs[i].flags |= CNTR_DISABLED;
11963 }
11964
11965 /* size port counter names and determine how many we have*/
11966 sz = 0;
11967 dd->nportcntrs = 0;
11968 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011969 if (port_cntrs[i].flags & CNTR_DISABLED) {
11970 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11971 continue;
11972 }
11973
11974 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011975 port_cntrs[i].offset = dd->nportcntrs;
11976 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011977 snprintf(name, C_MAX_NAME, "%s%d",
11978 port_cntrs[i].name,
11979 vl_from_idx(j));
11980 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011981 /* Add ",32" for 32-bit counters */
11982 if (port_cntrs[i].flags & CNTR_32BIT)
11983 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011984 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011985 dd->nportcntrs++;
11986 }
11987 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011988 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011989 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011990 /* Add ",32" for 32-bit counters */
11991 if (port_cntrs[i].flags & CNTR_32BIT)
11992 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011993 port_cntrs[i].offset = dd->nportcntrs;
11994 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011995 }
11996 }
11997
11998 /* allocate space for the counter names */
11999 dd->portcntrnameslen = sz;
12000 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12001 if (!dd->portcntrnames)
12002 goto bail;
12003
12004 /* fill in port cntr names */
12005 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12006 if (port_cntrs[i].flags & CNTR_DISABLED)
12007 continue;
12008
12009 if (port_cntrs[i].flags & CNTR_VL) {
12010 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012011 snprintf(name, C_MAX_NAME, "%s%d",
12012 port_cntrs[i].name,
12013 vl_from_idx(j));
12014 memcpy(p, name, strlen(name));
12015 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012016
12017 /* Counter is 32 bits */
12018 if (port_cntrs[i].flags & CNTR_32BIT) {
12019 memcpy(p, bit_type_32, bit_type_32_sz);
12020 p += bit_type_32_sz;
12021 }
12022
Mike Marciniszyn77241052015-07-30 15:17:43 -040012023 *p++ = '\n';
12024 }
12025 } else {
12026 memcpy(p, port_cntrs[i].name,
12027 strlen(port_cntrs[i].name));
12028 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012029
12030 /* Counter is 32 bits */
12031 if (port_cntrs[i].flags & CNTR_32BIT) {
12032 memcpy(p, bit_type_32, bit_type_32_sz);
12033 p += bit_type_32_sz;
12034 }
12035
Mike Marciniszyn77241052015-07-30 15:17:43 -040012036 *p++ = '\n';
12037 }
12038 }
12039
12040 /* allocate per port storage for counter values */
12041 ppd = (struct hfi1_pportdata *)(dd + 1);
12042 for (i = 0; i < dd->num_pports; i++, ppd++) {
12043 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12044 if (!ppd->cntrs)
12045 goto bail;
12046
12047 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12048 if (!ppd->scntrs)
12049 goto bail;
12050 }
12051
12052 /* CPU counters need to be allocated and zeroed */
12053 if (init_cpu_counters(dd))
12054 goto bail;
12055
12056 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12057 return 0;
12058bail:
12059 free_cntrs(dd);
12060 return -ENOMEM;
12061}
12062
12063
12064static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12065{
12066 switch (chip_lstate) {
12067 default:
12068 dd_dev_err(dd,
12069 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12070 chip_lstate);
12071 /* fall through */
12072 case LSTATE_DOWN:
12073 return IB_PORT_DOWN;
12074 case LSTATE_INIT:
12075 return IB_PORT_INIT;
12076 case LSTATE_ARMED:
12077 return IB_PORT_ARMED;
12078 case LSTATE_ACTIVE:
12079 return IB_PORT_ACTIVE;
12080 }
12081}
12082
12083u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12084{
12085 /* look at the HFI meta-states only */
12086 switch (chip_pstate & 0xf0) {
12087 default:
12088 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12089 chip_pstate);
12090 /* fall through */
12091 case PLS_DISABLED:
12092 return IB_PORTPHYSSTATE_DISABLED;
12093 case PLS_OFFLINE:
12094 return OPA_PORTPHYSSTATE_OFFLINE;
12095 case PLS_POLLING:
12096 return IB_PORTPHYSSTATE_POLLING;
12097 case PLS_CONFIGPHY:
12098 return IB_PORTPHYSSTATE_TRAINING;
12099 case PLS_LINKUP:
12100 return IB_PORTPHYSSTATE_LINKUP;
12101 case PLS_PHYTEST:
12102 return IB_PORTPHYSSTATE_PHY_TEST;
12103 }
12104}
12105
12106/* return the OPA port logical state name */
12107const char *opa_lstate_name(u32 lstate)
12108{
12109 static const char * const port_logical_names[] = {
12110 "PORT_NOP",
12111 "PORT_DOWN",
12112 "PORT_INIT",
12113 "PORT_ARMED",
12114 "PORT_ACTIVE",
12115 "PORT_ACTIVE_DEFER",
12116 };
12117 if (lstate < ARRAY_SIZE(port_logical_names))
12118 return port_logical_names[lstate];
12119 return "unknown";
12120}
12121
12122/* return the OPA port physical state name */
12123const char *opa_pstate_name(u32 pstate)
12124{
12125 static const char * const port_physical_names[] = {
12126 "PHYS_NOP",
12127 "reserved1",
12128 "PHYS_POLL",
12129 "PHYS_DISABLED",
12130 "PHYS_TRAINING",
12131 "PHYS_LINKUP",
12132 "PHYS_LINK_ERR_RECOVER",
12133 "PHYS_PHY_TEST",
12134 "reserved8",
12135 "PHYS_OFFLINE",
12136 "PHYS_GANGED",
12137 "PHYS_TEST",
12138 };
12139 if (pstate < ARRAY_SIZE(port_physical_names))
12140 return port_physical_names[pstate];
12141 return "unknown";
12142}
12143
12144/*
12145 * Read the hardware link state and set the driver's cached value of it.
12146 * Return the (new) current value.
12147 */
12148u32 get_logical_state(struct hfi1_pportdata *ppd)
12149{
12150 u32 new_state;
12151
12152 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12153 if (new_state != ppd->lstate) {
12154 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12155 opa_lstate_name(new_state), new_state);
12156 ppd->lstate = new_state;
12157 }
12158 /*
12159 * Set port status flags in the page mapped into userspace
12160 * memory. Do it here to ensure a reliable state - this is
12161 * the only function called by all state handling code.
12162 * Always set the flags due to the fact that the cache value
12163 * might have been changed explicitly outside of this
12164 * function.
12165 */
12166 if (ppd->statusp) {
12167 switch (ppd->lstate) {
12168 case IB_PORT_DOWN:
12169 case IB_PORT_INIT:
12170 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12171 HFI1_STATUS_IB_READY);
12172 break;
12173 case IB_PORT_ARMED:
12174 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12175 break;
12176 case IB_PORT_ACTIVE:
12177 *ppd->statusp |= HFI1_STATUS_IB_READY;
12178 break;
12179 }
12180 }
12181 return ppd->lstate;
12182}
12183
12184/**
12185 * wait_logical_linkstate - wait for an IB link state change to occur
12186 * @ppd: port device
12187 * @state: the state to wait for
12188 * @msecs: the number of milliseconds to wait
12189 *
12190 * Wait up to msecs milliseconds for IB link state change to occur.
12191 * For now, take the easy polling route.
12192 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12193 */
12194static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12195 int msecs)
12196{
12197 unsigned long timeout;
12198
12199 timeout = jiffies + msecs_to_jiffies(msecs);
12200 while (1) {
12201 if (get_logical_state(ppd) == state)
12202 return 0;
12203 if (time_after(jiffies, timeout))
12204 break;
12205 msleep(20);
12206 }
12207 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12208
12209 return -ETIMEDOUT;
12210}
12211
12212u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12213{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012214 u32 pstate;
12215 u32 ib_pstate;
12216
12217 pstate = read_physical_state(ppd->dd);
12218 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012219 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012220 dd_dev_info(ppd->dd,
12221 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12222 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12223 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012224 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012225 }
12226 return ib_pstate;
12227}
12228
12229/*
12230 * Read/modify/write ASIC_QSFP register bits as selected by mask
12231 * data: 0 or 1 in the positions depending on what needs to be written
12232 * dir: 0 for read, 1 for write
12233 * mask: select by setting
12234 * I2CCLK (bit 0)
12235 * I2CDATA (bit 1)
12236 */
12237u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12238 u32 mask)
12239{
12240 u64 qsfp_oe, target_oe;
12241
12242 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12243 if (mask) {
12244 /* We are writing register bits, so lock access */
12245 dir &= mask;
12246 data &= mask;
12247
12248 qsfp_oe = read_csr(dd, target_oe);
12249 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12250 write_csr(dd, target_oe, qsfp_oe);
12251 }
12252 /* We are exclusively reading bits here, but it is unlikely
12253 * we'll get valid data when we set the direction of the pin
12254 * in the same call, so read should call this function again
12255 * to get valid data
12256 */
12257 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12258}
12259
12260#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12261(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12262
12263#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12264(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12265
12266int hfi1_init_ctxt(struct send_context *sc)
12267{
12268 if (sc != NULL) {
12269 struct hfi1_devdata *dd = sc->dd;
12270 u64 reg;
12271 u8 set = (sc->type == SC_USER ?
12272 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12273 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12274 reg = read_kctxt_csr(dd, sc->hw_context,
12275 SEND_CTXT_CHECK_ENABLE);
12276 if (set)
12277 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12278 else
12279 SET_STATIC_RATE_CONTROL_SMASK(reg);
12280 write_kctxt_csr(dd, sc->hw_context,
12281 SEND_CTXT_CHECK_ENABLE, reg);
12282 }
12283 return 0;
12284}
12285
12286int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12287{
12288 int ret = 0;
12289 u64 reg;
12290
12291 if (dd->icode != ICODE_RTL_SILICON) {
12292 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12293 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12294 __func__);
12295 return -EINVAL;
12296 }
12297 reg = read_csr(dd, ASIC_STS_THERM);
12298 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12299 ASIC_STS_THERM_CURR_TEMP_MASK);
12300 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12301 ASIC_STS_THERM_LO_TEMP_MASK);
12302 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12303 ASIC_STS_THERM_HI_TEMP_MASK);
12304 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12305 ASIC_STS_THERM_CRIT_TEMP_MASK);
12306 /* triggers is a 3-bit value - 1 bit per trigger. */
12307 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12308
12309 return ret;
12310}
12311
12312/* ========================================================================= */
12313
12314/*
12315 * Enable/disable chip from delivering interrupts.
12316 */
12317void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12318{
12319 int i;
12320
12321 /*
12322 * In HFI, the mask needs to be 1 to allow interrupts.
12323 */
12324 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012325 /* enable all interrupts */
12326 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12327 write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
12328
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012329 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012330 } else {
12331 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12332 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
12333 }
12334}
12335
12336/*
12337 * Clear all interrupt sources on the chip.
12338 */
12339static void clear_all_interrupts(struct hfi1_devdata *dd)
12340{
12341 int i;
12342
12343 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12344 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
12345
12346 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12347 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12348 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12349 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12350 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12351 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12352 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12353 for (i = 0; i < dd->chip_send_contexts; i++)
12354 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12355 for (i = 0; i < dd->chip_sdma_engines; i++)
12356 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12357
12358 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12359 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12360 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12361}
12362
12363/* Move to pcie.c? */
12364static void disable_intx(struct pci_dev *pdev)
12365{
12366 pci_intx(pdev, 0);
12367}
12368
12369static void clean_up_interrupts(struct hfi1_devdata *dd)
12370{
12371 int i;
12372
12373 /* remove irqs - must happen before disabling/turning off */
12374 if (dd->num_msix_entries) {
12375 /* MSI-X */
12376 struct hfi1_msix_entry *me = dd->msix_entries;
12377
12378 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12379 if (me->arg == NULL) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012380 continue;
12381 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012382 free_irq(me->msix.vector, me->arg);
12383 }
12384 } else {
12385 /* INTx */
12386 if (dd->requested_intx_irq) {
12387 free_irq(dd->pcidev->irq, dd);
12388 dd->requested_intx_irq = 0;
12389 }
12390 }
12391
12392 /* turn off interrupts */
12393 if (dd->num_msix_entries) {
12394 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012395 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012396 } else {
12397 /* INTx */
12398 disable_intx(dd->pcidev);
12399 }
12400
12401 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012402 kfree(dd->msix_entries);
12403 dd->msix_entries = NULL;
12404 dd->num_msix_entries = 0;
12405}
12406
12407/*
12408 * Remap the interrupt source from the general handler to the given MSI-X
12409 * interrupt.
12410 */
12411static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12412{
12413 u64 reg;
12414 int m, n;
12415
12416 /* clear from the handled mask of the general interrupt */
12417 m = isrc / 64;
12418 n = isrc % 64;
12419 dd->gi_mask[m] &= ~((u64)1 << n);
12420
12421 /* direct the chip source to the given MSI-X interrupt */
12422 m = isrc / 8;
12423 n = isrc % 8;
12424 reg = read_csr(dd, CCE_INT_MAP + (8*m));
12425 reg &= ~((u64)0xff << (8*n));
12426 reg |= ((u64)msix_intr & 0xff) << (8*n);
12427 write_csr(dd, CCE_INT_MAP + (8*m), reg);
12428}
12429
12430static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12431 int engine, int msix_intr)
12432{
12433 /*
12434 * SDMA engine interrupt sources grouped by type, rather than
12435 * engine. Per-engine interrupts are as follows:
12436 * SDMA
12437 * SDMAProgress
12438 * SDMAIdle
12439 */
12440 remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12441 msix_intr);
12442 remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12443 msix_intr);
12444 remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12445 msix_intr);
12446}
12447
Mike Marciniszyn77241052015-07-30 15:17:43 -040012448static int request_intx_irq(struct hfi1_devdata *dd)
12449{
12450 int ret;
12451
Jubin John98050712015-11-16 21:59:27 -050012452 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12453 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012454 ret = request_irq(dd->pcidev->irq, general_interrupt,
12455 IRQF_SHARED, dd->intx_name, dd);
12456 if (ret)
12457 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12458 ret);
12459 else
12460 dd->requested_intx_irq = 1;
12461 return ret;
12462}
12463
12464static int request_msix_irqs(struct hfi1_devdata *dd)
12465{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012466 int first_general, last_general;
12467 int first_sdma, last_sdma;
12468 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012469 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012470
12471 /* calculate the ranges we are going to use */
12472 first_general = 0;
12473 first_sdma = last_general = first_general + 1;
12474 first_rx = last_sdma = first_sdma + dd->num_sdma;
12475 last_rx = first_rx + dd->n_krcv_queues;
12476
12477 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012478 * Sanity check - the code expects all SDMA chip source
12479 * interrupts to be in the same CSR, starting at bit 0. Verify
12480 * that this is true by checking the bit location of the start.
12481 */
12482 BUILD_BUG_ON(IS_SDMA_START % 64);
12483
12484 for (i = 0; i < dd->num_msix_entries; i++) {
12485 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12486 const char *err_info;
12487 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012488 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012489 void *arg;
12490 int idx;
12491 struct hfi1_ctxtdata *rcd = NULL;
12492 struct sdma_engine *sde = NULL;
12493
12494 /* obtain the arguments to request_irq */
12495 if (first_general <= i && i < last_general) {
12496 idx = i - first_general;
12497 handler = general_interrupt;
12498 arg = dd;
12499 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012500 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012501 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012502 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012503 } else if (first_sdma <= i && i < last_sdma) {
12504 idx = i - first_sdma;
12505 sde = &dd->per_sdma[idx];
12506 handler = sdma_interrupt;
12507 arg = sde;
12508 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012509 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012510 err_info = "sdma";
12511 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012512 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012513 } else if (first_rx <= i && i < last_rx) {
12514 idx = i - first_rx;
12515 rcd = dd->rcd[idx];
12516 /* no interrupt if no rcd */
12517 if (!rcd)
12518 continue;
12519 /*
12520 * Set the interrupt register and mask for this
12521 * context's interrupt.
12522 */
12523 rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12524 rcd->imask = ((u64)1) <<
12525 ((IS_RCVAVAIL_START+idx) % 64);
12526 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012527 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012528 arg = rcd;
12529 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012530 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012531 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012532 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012533 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012534 } else {
12535 /* not in our expected range - complain, then
12536 ignore it */
12537 dd_dev_err(dd,
12538 "Unexpected extra MSI-X interrupt %d\n", i);
12539 continue;
12540 }
12541 /* no argument, no interrupt */
12542 if (arg == NULL)
12543 continue;
12544 /* make sure the name is terminated */
12545 me->name[sizeof(me->name)-1] = 0;
12546
Dean Luickf4f30031c2015-10-26 10:28:44 -040012547 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12548 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012549 if (ret) {
12550 dd_dev_err(dd,
12551 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12552 err_info, me->msix.vector, idx, ret);
12553 return ret;
12554 }
12555 /*
12556 * assign arg after request_irq call, so it will be
12557 * cleaned up
12558 */
12559 me->arg = arg;
12560
Mitko Haralanov957558c2016-02-03 14:33:40 -080012561 ret = hfi1_get_irq_affinity(dd, me);
12562 if (ret)
12563 dd_dev_err(dd,
12564 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012565 }
12566
Mike Marciniszyn77241052015-07-30 15:17:43 -040012567 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012568}
12569
12570/*
12571 * Set the general handler to accept all interrupts, remap all
12572 * chip interrupts back to MSI-X 0.
12573 */
12574static void reset_interrupts(struct hfi1_devdata *dd)
12575{
12576 int i;
12577
12578 /* all interrupts handled by the general handler */
12579 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12580 dd->gi_mask[i] = ~(u64)0;
12581
12582 /* all chip interrupts map to MSI-X 0 */
12583 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12584 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12585}
12586
12587static int set_up_interrupts(struct hfi1_devdata *dd)
12588{
12589 struct hfi1_msix_entry *entries;
12590 u32 total, request;
12591 int i, ret;
12592 int single_interrupt = 0; /* we expect to have all the interrupts */
12593
12594 /*
12595 * Interrupt count:
12596 * 1 general, "slow path" interrupt (includes the SDMA engines
12597 * slow source, SDMACleanupDone)
12598 * N interrupts - one per used SDMA engine
12599 * M interrupt - one per kernel receive context
12600 */
12601 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12602
12603 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12604 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012605 ret = -ENOMEM;
12606 goto fail;
12607 }
12608 /* 1-1 MSI-X entry assignment */
12609 for (i = 0; i < total; i++)
12610 entries[i].msix.entry = i;
12611
12612 /* ask for MSI-X interrupts */
12613 request = total;
12614 request_msix(dd, &request, entries);
12615
12616 if (request == 0) {
12617 /* using INTx */
12618 /* dd->num_msix_entries already zero */
12619 kfree(entries);
12620 single_interrupt = 1;
12621 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12622 } else {
12623 /* using MSI-X */
12624 dd->num_msix_entries = request;
12625 dd->msix_entries = entries;
12626
12627 if (request != total) {
12628 /* using MSI-X, with reduced interrupts */
12629 dd_dev_err(
12630 dd,
12631 "cannot handle reduced interrupt case, want %u, got %u\n",
12632 total, request);
12633 ret = -EINVAL;
12634 goto fail;
12635 }
12636 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12637 }
12638
12639 /* mask all interrupts */
12640 set_intr_state(dd, 0);
12641 /* clear all pending interrupts */
12642 clear_all_interrupts(dd);
12643
12644 /* reset general handler mask, chip MSI-X mappings */
12645 reset_interrupts(dd);
12646
12647 if (single_interrupt)
12648 ret = request_intx_irq(dd);
12649 else
12650 ret = request_msix_irqs(dd);
12651 if (ret)
12652 goto fail;
12653
12654 return 0;
12655
12656fail:
12657 clean_up_interrupts(dd);
12658 return ret;
12659}
12660
12661/*
12662 * Set up context values in dd. Sets:
12663 *
12664 * num_rcv_contexts - number of contexts being used
12665 * n_krcv_queues - number of kernel contexts
12666 * first_user_ctxt - first non-kernel context in array of contexts
12667 * freectxts - number of free user contexts
12668 * num_send_contexts - number of PIO send contexts being used
12669 */
12670static int set_up_context_variables(struct hfi1_devdata *dd)
12671{
12672 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012673 int total_contexts;
12674 int ret;
12675 unsigned ngroups;
12676
12677 /*
12678 * Kernel contexts: (to be fixed later):
12679 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012680 * - Context 0 - control context (VL15/multicast/error)
12681 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012682 */
12683 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012684 /*
12685 * Don't count context 0 in n_krcvqs since
12686 * is isn't used for normal verbs traffic.
12687 *
12688 * krcvqs will reflect number of kernel
12689 * receive contexts above 0.
12690 */
12691 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012692 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012693 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012694 num_kernel_contexts =
12695 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12696 /*
12697 * Every kernel receive context needs an ACK send context.
12698 * one send context is allocated for each VL{0-7} and VL15
12699 */
12700 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12701 dd_dev_err(dd,
12702 "Reducing # kernel rcv contexts to: %d, from %d\n",
12703 (int)(dd->chip_send_contexts - num_vls - 1),
12704 (int)num_kernel_contexts);
12705 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12706 }
12707 /*
12708 * User contexts: (to be fixed later)
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012709 * - default to 1 user context per CPU if num_user_contexts is
12710 * negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012711 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012712 if (num_user_contexts < 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012713 num_user_contexts = num_online_cpus();
12714
12715 total_contexts = num_kernel_contexts + num_user_contexts;
12716
12717 /*
12718 * Adjust the counts given a global max.
12719 */
12720 if (total_contexts > dd->chip_rcv_contexts) {
12721 dd_dev_err(dd,
12722 "Reducing # user receive contexts to: %d, from %d\n",
12723 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12724 (int)num_user_contexts);
12725 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12726 /* recalculate */
12727 total_contexts = num_kernel_contexts + num_user_contexts;
12728 }
12729
12730 /* the first N are kernel contexts, the rest are user contexts */
12731 dd->num_rcv_contexts = total_contexts;
12732 dd->n_krcv_queues = num_kernel_contexts;
12733 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012734 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012735 dd->freectxts = num_user_contexts;
12736 dd_dev_info(dd,
12737 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12738 (int)dd->chip_rcv_contexts,
12739 (int)dd->num_rcv_contexts,
12740 (int)dd->n_krcv_queues,
12741 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12742
12743 /*
12744 * Receive array allocation:
12745 * All RcvArray entries are divided into groups of 8. This
12746 * is required by the hardware and will speed up writes to
12747 * consecutive entries by using write-combining of the entire
12748 * cacheline.
12749 *
12750 * The number of groups are evenly divided among all contexts.
12751 * any left over groups will be given to the first N user
12752 * contexts.
12753 */
12754 dd->rcv_entries.group_size = RCV_INCREMENT;
12755 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12756 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12757 dd->rcv_entries.nctxt_extra = ngroups -
12758 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12759 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12760 dd->rcv_entries.ngroups,
12761 dd->rcv_entries.nctxt_extra);
12762 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12763 MAX_EAGER_ENTRIES * 2) {
12764 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12765 dd->rcv_entries.group_size;
12766 dd_dev_info(dd,
12767 "RcvArray group count too high, change to %u\n",
12768 dd->rcv_entries.ngroups);
12769 dd->rcv_entries.nctxt_extra = 0;
12770 }
12771 /*
12772 * PIO send contexts
12773 */
12774 ret = init_sc_pools_and_sizes(dd);
12775 if (ret >= 0) { /* success */
12776 dd->num_send_contexts = ret;
12777 dd_dev_info(
12778 dd,
12779 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12780 dd->chip_send_contexts,
12781 dd->num_send_contexts,
12782 dd->sc_sizes[SC_KERNEL].count,
12783 dd->sc_sizes[SC_ACK].count,
12784 dd->sc_sizes[SC_USER].count);
12785 ret = 0; /* success */
12786 }
12787
12788 return ret;
12789}
12790
12791/*
12792 * Set the device/port partition key table. The MAD code
12793 * will ensure that, at least, the partial management
12794 * partition key is present in the table.
12795 */
12796static void set_partition_keys(struct hfi1_pportdata *ppd)
12797{
12798 struct hfi1_devdata *dd = ppd->dd;
12799 u64 reg = 0;
12800 int i;
12801
12802 dd_dev_info(dd, "Setting partition keys\n");
12803 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12804 reg |= (ppd->pkeys[i] &
12805 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12806 ((i % 4) *
12807 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12808 /* Each register holds 4 PKey values. */
12809 if ((i % 4) == 3) {
12810 write_csr(dd, RCV_PARTITION_KEY +
12811 ((i - 3) * 2), reg);
12812 reg = 0;
12813 }
12814 }
12815
12816 /* Always enable HW pkeys check when pkeys table is set */
12817 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12818}
12819
12820/*
12821 * These CSRs and memories are uninitialized on reset and must be
12822 * written before reading to set the ECC/parity bits.
12823 *
12824 * NOTE: All user context CSRs that are not mmaped write-only
12825 * (e.g. the TID flows) must be initialized even if the driver never
12826 * reads them.
12827 */
12828static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12829{
12830 int i, j;
12831
12832 /* CceIntMap */
12833 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12834 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12835
12836 /* SendCtxtCreditReturnAddr */
12837 for (i = 0; i < dd->chip_send_contexts; i++)
12838 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12839
12840 /* PIO Send buffers */
12841 /* SDMA Send buffers */
12842 /* These are not normally read, and (presently) have no method
12843 to be read, so are not pre-initialized */
12844
12845 /* RcvHdrAddr */
12846 /* RcvHdrTailAddr */
12847 /* RcvTidFlowTable */
12848 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12849 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12850 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12851 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12852 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12853 }
12854
12855 /* RcvArray */
12856 for (i = 0; i < dd->chip_rcv_array_count; i++)
12857 write_csr(dd, RCV_ARRAY + (8*i),
12858 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12859
12860 /* RcvQPMapTable */
12861 for (i = 0; i < 32; i++)
12862 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12863}
12864
12865/*
12866 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12867 */
12868static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12869 u64 ctrl_bits)
12870{
12871 unsigned long timeout;
12872 u64 reg;
12873
12874 /* is the condition present? */
12875 reg = read_csr(dd, CCE_STATUS);
12876 if ((reg & status_bits) == 0)
12877 return;
12878
12879 /* clear the condition */
12880 write_csr(dd, CCE_CTRL, ctrl_bits);
12881
12882 /* wait for the condition to clear */
12883 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12884 while (1) {
12885 reg = read_csr(dd, CCE_STATUS);
12886 if ((reg & status_bits) == 0)
12887 return;
12888 if (time_after(jiffies, timeout)) {
12889 dd_dev_err(dd,
12890 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12891 status_bits, reg & status_bits);
12892 return;
12893 }
12894 udelay(1);
12895 }
12896}
12897
12898/* set CCE CSRs to chip reset defaults */
12899static void reset_cce_csrs(struct hfi1_devdata *dd)
12900{
12901 int i;
12902
12903 /* CCE_REVISION read-only */
12904 /* CCE_REVISION2 read-only */
12905 /* CCE_CTRL - bits clear automatically */
12906 /* CCE_STATUS read-only, use CceCtrl to clear */
12907 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12908 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12909 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12910 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12911 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12912 /* CCE_ERR_STATUS read-only */
12913 write_csr(dd, CCE_ERR_MASK, 0);
12914 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12915 /* CCE_ERR_FORCE leave alone */
12916 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12917 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12918 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12919 /* CCE_PCIE_CTRL leave alone */
12920 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12921 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12922 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12923 CCE_MSIX_TABLE_UPPER_RESETCSR);
12924 }
12925 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12926 /* CCE_MSIX_PBA read-only */
12927 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12928 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12929 }
12930 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12931 write_csr(dd, CCE_INT_MAP, 0);
12932 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12933 /* CCE_INT_STATUS read-only */
12934 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12935 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12936 /* CCE_INT_FORCE leave alone */
12937 /* CCE_INT_BLOCKED read-only */
12938 }
12939 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12940 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12941}
12942
12943/* set ASIC CSRs to chip reset defaults */
12944static void reset_asic_csrs(struct hfi1_devdata *dd)
12945{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012946 int i;
12947
12948 /*
12949 * If the HFIs are shared between separate nodes or VMs,
12950 * then more will need to be done here. One idea is a module
12951 * parameter that returns early, letting the first power-on or
12952 * a known first load do the reset and blocking all others.
12953 */
12954
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012955 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12956 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012957
12958 if (dd->icode != ICODE_FPGA_EMULATION) {
12959 /* emulation does not have an SBus - leave these alone */
12960 /*
12961 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12962 * Notes:
12963 * o The reset is not zero if aimed at the core. See the
12964 * SBus documentation for details.
12965 * o If the SBus firmware has been updated (e.g. by the BIOS),
12966 * will the reset revert that?
12967 */
12968 /* ASIC_CFG_SBUS_REQUEST leave alone */
12969 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12970 }
12971 /* ASIC_SBUS_RESULT read-only */
12972 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12973 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12974 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12975 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012976
12977 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012978 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012979
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050012980 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012981 /* ASIC_STS_THERM read-only */
12982 /* ASIC_CFG_RESET leave alone */
12983
12984 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12985 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12986 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12987 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12988 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12989 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12990 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12991 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12992 for (i = 0; i < 16; i++)
12993 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12994
12995 /* ASIC_GPIO_IN read-only */
12996 write_csr(dd, ASIC_GPIO_OE, 0);
12997 write_csr(dd, ASIC_GPIO_INVERT, 0);
12998 write_csr(dd, ASIC_GPIO_OUT, 0);
12999 write_csr(dd, ASIC_GPIO_MASK, 0);
13000 /* ASIC_GPIO_STATUS read-only */
13001 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
13002 /* ASIC_GPIO_FORCE leave alone */
13003
13004 /* ASIC_QSFP1_IN read-only */
13005 write_csr(dd, ASIC_QSFP1_OE, 0);
13006 write_csr(dd, ASIC_QSFP1_INVERT, 0);
13007 write_csr(dd, ASIC_QSFP1_OUT, 0);
13008 write_csr(dd, ASIC_QSFP1_MASK, 0);
13009 /* ASIC_QSFP1_STATUS read-only */
13010 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
13011 /* ASIC_QSFP1_FORCE leave alone */
13012
13013 /* ASIC_QSFP2_IN read-only */
13014 write_csr(dd, ASIC_QSFP2_OE, 0);
13015 write_csr(dd, ASIC_QSFP2_INVERT, 0);
13016 write_csr(dd, ASIC_QSFP2_OUT, 0);
13017 write_csr(dd, ASIC_QSFP2_MASK, 0);
13018 /* ASIC_QSFP2_STATUS read-only */
13019 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
13020 /* ASIC_QSFP2_FORCE leave alone */
13021
13022 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
13023 /* this also writes a NOP command, clearing paging mode */
13024 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
13025 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013026}
13027
13028/* set MISC CSRs to chip reset defaults */
13029static void reset_misc_csrs(struct hfi1_devdata *dd)
13030{
13031 int i;
13032
13033 for (i = 0; i < 32; i++) {
13034 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13035 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13036 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13037 }
13038 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13039 only be written 128-byte chunks */
13040 /* init RSA engine to clear lingering errors */
13041 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13042 write_csr(dd, MISC_CFG_RSA_MU, 0);
13043 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13044 /* MISC_STS_8051_DIGEST read-only */
13045 /* MISC_STS_SBM_DIGEST read-only */
13046 /* MISC_STS_PCIE_DIGEST read-only */
13047 /* MISC_STS_FAB_DIGEST read-only */
13048 /* MISC_ERR_STATUS read-only */
13049 write_csr(dd, MISC_ERR_MASK, 0);
13050 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13051 /* MISC_ERR_FORCE leave alone */
13052}
13053
13054/* set TXE CSRs to chip reset defaults */
13055static void reset_txe_csrs(struct hfi1_devdata *dd)
13056{
13057 int i;
13058
13059 /*
13060 * TXE Kernel CSRs
13061 */
13062 write_csr(dd, SEND_CTRL, 0);
13063 __cm_reset(dd, 0); /* reset CM internal state */
13064 /* SEND_CONTEXTS read-only */
13065 /* SEND_DMA_ENGINES read-only */
13066 /* SEND_PIO_MEM_SIZE read-only */
13067 /* SEND_DMA_MEM_SIZE read-only */
13068 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13069 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13070 /* SEND_PIO_ERR_STATUS read-only */
13071 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13072 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13073 /* SEND_PIO_ERR_FORCE leave alone */
13074 /* SEND_DMA_ERR_STATUS read-only */
13075 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13076 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13077 /* SEND_DMA_ERR_FORCE leave alone */
13078 /* SEND_EGRESS_ERR_STATUS read-only */
13079 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13080 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13081 /* SEND_EGRESS_ERR_FORCE leave alone */
13082 write_csr(dd, SEND_BTH_QP, 0);
13083 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13084 write_csr(dd, SEND_SC2VLT0, 0);
13085 write_csr(dd, SEND_SC2VLT1, 0);
13086 write_csr(dd, SEND_SC2VLT2, 0);
13087 write_csr(dd, SEND_SC2VLT3, 0);
13088 write_csr(dd, SEND_LEN_CHECK0, 0);
13089 write_csr(dd, SEND_LEN_CHECK1, 0);
13090 /* SEND_ERR_STATUS read-only */
13091 write_csr(dd, SEND_ERR_MASK, 0);
13092 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13093 /* SEND_ERR_FORCE read-only */
13094 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13095 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
13096 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13097 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
13098 for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
13099 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
13100 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13101 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
13102 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13103 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
13104 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13105 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
13106 SEND_CM_GLOBAL_CREDIT_RESETCSR);
13107 /* SEND_CM_CREDIT_USED_STATUS read-only */
13108 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13109 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13110 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13111 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13112 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13113 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13114 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
13115 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13116 /* SEND_CM_CREDIT_USED_VL read-only */
13117 /* SEND_CM_CREDIT_USED_VL15 read-only */
13118 /* SEND_EGRESS_CTXT_STATUS read-only */
13119 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13120 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13121 /* SEND_EGRESS_ERR_INFO read-only */
13122 /* SEND_EGRESS_ERR_SOURCE read-only */
13123
13124 /*
13125 * TXE Per-Context CSRs
13126 */
13127 for (i = 0; i < dd->chip_send_contexts; i++) {
13128 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13129 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13130 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13131 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13132 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13133 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13134 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13135 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13136 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13137 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13138 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13139 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13140 }
13141
13142 /*
13143 * TXE Per-SDMA CSRs
13144 */
13145 for (i = 0; i < dd->chip_sdma_engines; i++) {
13146 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13147 /* SEND_DMA_STATUS read-only */
13148 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13149 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13150 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13151 /* SEND_DMA_HEAD read-only */
13152 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13153 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13154 /* SEND_DMA_IDLE_CNT read-only */
13155 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13156 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13157 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13158 /* SEND_DMA_ENG_ERR_STATUS read-only */
13159 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13160 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13161 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13162 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13163 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13164 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13165 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13166 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13167 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13168 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13169 }
13170}
13171
13172/*
13173 * Expect on entry:
13174 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13175 */
13176static void init_rbufs(struct hfi1_devdata *dd)
13177{
13178 u64 reg;
13179 int count;
13180
13181 /*
13182 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13183 * clear.
13184 */
13185 count = 0;
13186 while (1) {
13187 reg = read_csr(dd, RCV_STATUS);
13188 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13189 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13190 break;
13191 /*
13192 * Give up after 1ms - maximum wait time.
13193 *
13194 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13195 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13196 * 148 KB / (66% * 250MB/s) = 920us
13197 */
13198 if (count++ > 500) {
13199 dd_dev_err(dd,
13200 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13201 __func__, reg);
13202 break;
13203 }
13204 udelay(2); /* do not busy-wait the CSR */
13205 }
13206
13207 /* start the init - expect RcvCtrl to be 0 */
13208 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13209
13210 /*
13211 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13212 * period after the write before RcvStatus.RxRbufInitDone is valid.
13213 * The delay in the first run through the loop below is sufficient and
13214 * required before the first read of RcvStatus.RxRbufInintDone.
13215 */
13216 read_csr(dd, RCV_CTRL);
13217
13218 /* wait for the init to finish */
13219 count = 0;
13220 while (1) {
13221 /* delay is required first time through - see above */
13222 udelay(2); /* do not busy-wait the CSR */
13223 reg = read_csr(dd, RCV_STATUS);
13224 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13225 break;
13226
13227 /* give up after 100us - slowest possible at 33MHz is 73us */
13228 if (count++ > 50) {
13229 dd_dev_err(dd,
13230 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13231 __func__);
13232 break;
13233 }
13234 }
13235}
13236
13237/* set RXE CSRs to chip reset defaults */
13238static void reset_rxe_csrs(struct hfi1_devdata *dd)
13239{
13240 int i, j;
13241
13242 /*
13243 * RXE Kernel CSRs
13244 */
13245 write_csr(dd, RCV_CTRL, 0);
13246 init_rbufs(dd);
13247 /* RCV_STATUS read-only */
13248 /* RCV_CONTEXTS read-only */
13249 /* RCV_ARRAY_CNT read-only */
13250 /* RCV_BUF_SIZE read-only */
13251 write_csr(dd, RCV_BTH_QP, 0);
13252 write_csr(dd, RCV_MULTICAST, 0);
13253 write_csr(dd, RCV_BYPASS, 0);
13254 write_csr(dd, RCV_VL15, 0);
13255 /* this is a clear-down */
13256 write_csr(dd, RCV_ERR_INFO,
13257 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13258 /* RCV_ERR_STATUS read-only */
13259 write_csr(dd, RCV_ERR_MASK, 0);
13260 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13261 /* RCV_ERR_FORCE leave alone */
13262 for (i = 0; i < 32; i++)
13263 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13264 for (i = 0; i < 4; i++)
13265 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13266 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13267 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13268 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13269 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13270 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13271 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13272 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13273 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13274 }
13275 for (i = 0; i < 32; i++)
13276 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13277
13278 /*
13279 * RXE Kernel and User Per-Context CSRs
13280 */
13281 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13282 /* kernel */
13283 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13284 /* RCV_CTXT_STATUS read-only */
13285 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13286 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13287 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13288 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13289 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13290 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13291 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13292 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13293 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13294 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13295
13296 /* user */
13297 /* RCV_HDR_TAIL read-only */
13298 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13299 /* RCV_EGR_INDEX_TAIL read-only */
13300 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13301 /* RCV_EGR_OFFSET_TAIL read-only */
13302 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13303 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13304 0);
13305 }
13306 }
13307}
13308
13309/*
13310 * Set sc2vl tables.
13311 *
13312 * They power on to zeros, so to avoid send context errors
13313 * they need to be set:
13314 *
13315 * SC 0-7 -> VL 0-7 (respectively)
13316 * SC 15 -> VL 15
13317 * otherwise
13318 * -> VL 0
13319 */
13320static void init_sc2vl_tables(struct hfi1_devdata *dd)
13321{
13322 int i;
13323 /* init per architecture spec, constrained by hardware capability */
13324
13325 /* HFI maps sent packets */
13326 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13327 0,
13328 0, 0, 1, 1,
13329 2, 2, 3, 3,
13330 4, 4, 5, 5,
13331 6, 6, 7, 7));
13332 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13333 1,
13334 8, 0, 9, 0,
13335 10, 0, 11, 0,
13336 12, 0, 13, 0,
13337 14, 0, 15, 15));
13338 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13339 2,
13340 16, 0, 17, 0,
13341 18, 0, 19, 0,
13342 20, 0, 21, 0,
13343 22, 0, 23, 0));
13344 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13345 3,
13346 24, 0, 25, 0,
13347 26, 0, 27, 0,
13348 28, 0, 29, 0,
13349 30, 0, 31, 0));
13350
13351 /* DC maps received packets */
13352 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13353 15_0,
13354 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13355 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13356 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13357 31_16,
13358 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13359 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13360
13361 /* initialize the cached sc2vl values consistently with h/w */
13362 for (i = 0; i < 32; i++) {
13363 if (i < 8 || i == 15)
13364 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13365 else
13366 *((u8 *)(dd->sc2vl) + i) = 0;
13367 }
13368}
13369
13370/*
13371 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13372 * depend on the chip going through a power-on reset - a driver may be loaded
13373 * and unloaded many times.
13374 *
13375 * Do not write any CSR values to the chip in this routine - there may be
13376 * a reset following the (possible) FLR in this routine.
13377 *
13378 */
13379static void init_chip(struct hfi1_devdata *dd)
13380{
13381 int i;
13382
13383 /*
13384 * Put the HFI CSRs in a known state.
13385 * Combine this with a DC reset.
13386 *
13387 * Stop the device from doing anything while we do a
13388 * reset. We know there are no other active users of
13389 * the device since we are now in charge. Turn off
13390 * off all outbound and inbound traffic and make sure
13391 * the device does not generate any interrupts.
13392 */
13393
13394 /* disable send contexts and SDMA engines */
13395 write_csr(dd, SEND_CTRL, 0);
13396 for (i = 0; i < dd->chip_send_contexts; i++)
13397 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13398 for (i = 0; i < dd->chip_sdma_engines; i++)
13399 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13400 /* disable port (turn off RXE inbound traffic) and contexts */
13401 write_csr(dd, RCV_CTRL, 0);
13402 for (i = 0; i < dd->chip_rcv_contexts; i++)
13403 write_csr(dd, RCV_CTXT_CTRL, 0);
13404 /* mask all interrupt sources */
13405 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13406 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13407
13408 /*
13409 * DC Reset: do a full DC reset before the register clear.
13410 * A recommended length of time to hold is one CSR read,
13411 * so reread the CceDcCtrl. Then, hold the DC in reset
13412 * across the clear.
13413 */
13414 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13415 (void) read_csr(dd, CCE_DC_CTRL);
13416
13417 if (use_flr) {
13418 /*
13419 * A FLR will reset the SPC core and part of the PCIe.
13420 * The parts that need to be restored have already been
13421 * saved.
13422 */
13423 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13424
13425 /* do the FLR, the DC reset will remain */
13426 hfi1_pcie_flr(dd);
13427
13428 /* restore command and BARs */
13429 restore_pci_variables(dd);
13430
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013431 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013432 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13433 hfi1_pcie_flr(dd);
13434 restore_pci_variables(dd);
13435 }
13436
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013437 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013438 } else {
13439 dd_dev_info(dd, "Resetting CSRs with writes\n");
13440 reset_cce_csrs(dd);
13441 reset_txe_csrs(dd);
13442 reset_rxe_csrs(dd);
13443 reset_asic_csrs(dd);
13444 reset_misc_csrs(dd);
13445 }
13446 /* clear the DC reset */
13447 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013448
Mike Marciniszyn77241052015-07-30 15:17:43 -040013449 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013450 setextled(dd, 0);
13451
Mike Marciniszyn77241052015-07-30 15:17:43 -040013452 /*
13453 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013454 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013455 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013456 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013457 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013458 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013459 * I2CCLK and I2CDAT will change per direction, and INT_N and
13460 * MODPRS_N are input only and their value is ignored.
13461 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013462 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13463 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013464}
13465
13466static void init_early_variables(struct hfi1_devdata *dd)
13467{
13468 int i;
13469
13470 /* assign link credit variables */
13471 dd->vau = CM_VAU;
13472 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013473 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013474 dd->link_credits--;
13475 dd->vcu = cu_to_vcu(hfi1_cu);
13476 /* enough room for 8 MAD packets plus header - 17K */
13477 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13478 if (dd->vl15_init > dd->link_credits)
13479 dd->vl15_init = dd->link_credits;
13480
13481 write_uninitialized_csrs_and_memories(dd);
13482
13483 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13484 for (i = 0; i < dd->num_pports; i++) {
13485 struct hfi1_pportdata *ppd = &dd->pport[i];
13486
13487 set_partition_keys(ppd);
13488 }
13489 init_sc2vl_tables(dd);
13490}
13491
13492static void init_kdeth_qp(struct hfi1_devdata *dd)
13493{
13494 /* user changed the KDETH_QP */
13495 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13496 /* out of range or illegal value */
13497 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13498 kdeth_qp = 0;
13499 }
13500 if (kdeth_qp == 0) /* not set, or failed range check */
13501 kdeth_qp = DEFAULT_KDETH_QP;
13502
13503 write_csr(dd, SEND_BTH_QP,
13504 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13505 << SEND_BTH_QP_KDETH_QP_SHIFT);
13506
13507 write_csr(dd, RCV_BTH_QP,
13508 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13509 << RCV_BTH_QP_KDETH_QP_SHIFT);
13510}
13511
13512/**
13513 * init_qpmap_table
13514 * @dd - device data
13515 * @first_ctxt - first context
13516 * @last_ctxt - first context
13517 *
13518 * This return sets the qpn mapping table that
13519 * is indexed by qpn[8:1].
13520 *
13521 * The routine will round robin the 256 settings
13522 * from first_ctxt to last_ctxt.
13523 *
13524 * The first/last looks ahead to having specialized
13525 * receive contexts for mgmt and bypass. Normal
13526 * verbs traffic will assumed to be on a range
13527 * of receive contexts.
13528 */
13529static void init_qpmap_table(struct hfi1_devdata *dd,
13530 u32 first_ctxt,
13531 u32 last_ctxt)
13532{
13533 u64 reg = 0;
13534 u64 regno = RCV_QP_MAP_TABLE;
13535 int i;
13536 u64 ctxt = first_ctxt;
13537
13538 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013539 reg |= ctxt << (8 * (i % 8));
13540 i++;
13541 ctxt++;
13542 if (ctxt > last_ctxt)
13543 ctxt = first_ctxt;
13544 if (i % 8 == 0) {
13545 write_csr(dd, regno, reg);
13546 reg = 0;
13547 regno += 8;
13548 }
13549 }
13550 if (i % 8)
13551 write_csr(dd, regno, reg);
13552
13553 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13554 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13555}
13556
13557/**
13558 * init_qos - init RX qos
13559 * @dd - device data
13560 * @first_context
13561 *
13562 * This routine initializes Rule 0 and the
13563 * RSM map table to implement qos.
13564 *
13565 * If all of the limit tests succeed,
13566 * qos is applied based on the array
13567 * interpretation of krcvqs where
13568 * entry 0 is VL0.
13569 *
13570 * The number of vl bits (n) and the number of qpn
13571 * bits (m) are computed to feed both the RSM map table
13572 * and the single rule.
13573 *
13574 */
13575static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13576{
13577 u8 max_by_vl = 0;
13578 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13579 u64 *rsmmap;
13580 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013581 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013582
13583 /* validate */
13584 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13585 num_vls == 1 ||
13586 krcvqsset <= 1)
13587 goto bail;
13588 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13589 if (krcvqs[i] > max_by_vl)
13590 max_by_vl = krcvqs[i];
13591 if (max_by_vl > 32)
13592 goto bail;
13593 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13594 /* determine bits vl */
13595 n = ilog2(num_vls);
13596 /* determine bits for qpn */
13597 m = ilog2(qpns_per_vl);
13598 if ((m + n) > 7)
13599 goto bail;
13600 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13601 goto bail;
13602 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013603 if (!rsmmap)
13604 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013605 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13606 /* init the local copy of the table */
13607 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13608 unsigned tctxt;
13609
13610 for (qpn = 0, tctxt = ctxt;
13611 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13612 unsigned idx, regoff, regidx;
13613
13614 /* generate index <= 128 */
13615 idx = (qpn << n) ^ i;
13616 regoff = (idx % 8) * 8;
13617 regidx = idx / 8;
13618 reg = rsmmap[regidx];
13619 /* replace 0xff with context number */
13620 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13621 << regoff);
13622 reg |= (u64)(tctxt++) << regoff;
13623 rsmmap[regidx] = reg;
13624 if (tctxt == ctxt + krcvqs[i])
13625 tctxt = ctxt;
13626 }
13627 ctxt += krcvqs[i];
13628 }
13629 /* flush cached copies to chip */
13630 for (i = 0; i < NUM_MAP_REGS; i++)
13631 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13632 /* add rule0 */
13633 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13634 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13635 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13636 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13637 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13638 LRH_BTH_MATCH_OFFSET
13639 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13640 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13641 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13642 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13643 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13644 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13645 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13646 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13647 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13648 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13649 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13650 /* Enable RSM */
13651 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13652 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013653 /* map everything else to first context */
13654 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013655 dd->qos_shift = n + 1;
13656 return;
13657bail:
13658 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013659 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013660}
13661
13662static void init_rxe(struct hfi1_devdata *dd)
13663{
13664 /* enable all receive errors */
13665 write_csr(dd, RCV_ERR_MASK, ~0ull);
13666 /* setup QPN map table - start where VL15 context leaves off */
13667 init_qos(
13668 dd,
13669 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13670 /*
13671 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13672 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13673 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13674 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13675 * Max_PayLoad_Size set to its minimum of 128.
13676 *
13677 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13678 * (64 bytes). Max_Payload_Size is possibly modified upward in
13679 * tune_pcie_caps() which is called after this routine.
13680 */
13681}
13682
13683static void init_other(struct hfi1_devdata *dd)
13684{
13685 /* enable all CCE errors */
13686 write_csr(dd, CCE_ERR_MASK, ~0ull);
13687 /* enable *some* Misc errors */
13688 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13689 /* enable all DC errors, except LCB */
13690 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13691 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13692}
13693
13694/*
13695 * Fill out the given AU table using the given CU. A CU is defined in terms
13696 * AUs. The table is a an encoding: given the index, how many AUs does that
13697 * represent?
13698 *
13699 * NOTE: Assumes that the register layout is the same for the
13700 * local and remote tables.
13701 */
13702static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13703 u32 csr0to3, u32 csr4to7)
13704{
13705 write_csr(dd, csr0to3,
13706 0ull <<
13707 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13708 | 1ull <<
13709 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13710 | 2ull * cu <<
13711 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13712 | 4ull * cu <<
13713 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13714 write_csr(dd, csr4to7,
13715 8ull * cu <<
13716 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13717 | 16ull * cu <<
13718 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13719 | 32ull * cu <<
13720 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13721 | 64ull * cu <<
13722 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13723
13724}
13725
13726static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13727{
13728 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13729 SEND_CM_LOCAL_AU_TABLE4_TO7);
13730}
13731
13732void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13733{
13734 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13735 SEND_CM_REMOTE_AU_TABLE4_TO7);
13736}
13737
13738static void init_txe(struct hfi1_devdata *dd)
13739{
13740 int i;
13741
13742 /* enable all PIO, SDMA, general, and Egress errors */
13743 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13744 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13745 write_csr(dd, SEND_ERR_MASK, ~0ull);
13746 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13747
13748 /* enable all per-context and per-SDMA engine errors */
13749 for (i = 0; i < dd->chip_send_contexts; i++)
13750 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13751 for (i = 0; i < dd->chip_sdma_engines; i++)
13752 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13753
13754 /* set the local CU to AU mapping */
13755 assign_local_cm_au_table(dd, dd->vcu);
13756
13757 /*
13758 * Set reasonable default for Credit Return Timer
13759 * Don't set on Simulator - causes it to choke.
13760 */
13761 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13762 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13763}
13764
13765int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13766{
13767 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13768 unsigned sctxt;
13769 int ret = 0;
13770 u64 reg;
13771
13772 if (!rcd || !rcd->sc) {
13773 ret = -EINVAL;
13774 goto done;
13775 }
13776 sctxt = rcd->sc->hw_context;
13777 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13778 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13779 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13780 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13781 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13782 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13783 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13784 /*
13785 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013786 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013787 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013788 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13789 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13790 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13791 }
13792
13793 /* Enable J_KEY check on receive context. */
13794 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13795 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13796 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13797 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13798done:
13799 return ret;
13800}
13801
13802int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13803{
13804 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13805 unsigned sctxt;
13806 int ret = 0;
13807 u64 reg;
13808
13809 if (!rcd || !rcd->sc) {
13810 ret = -EINVAL;
13811 goto done;
13812 }
13813 sctxt = rcd->sc->hw_context;
13814 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13815 /*
13816 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13817 * This check would not have been enabled for A0 h/w, see
13818 * set_ctxt_jkey().
13819 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013820 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013821 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13822 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13823 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13824 }
13825 /* Turn off the J_KEY on the receive side */
13826 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13827done:
13828 return ret;
13829}
13830
13831int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13832{
13833 struct hfi1_ctxtdata *rcd;
13834 unsigned sctxt;
13835 int ret = 0;
13836 u64 reg;
13837
13838 if (ctxt < dd->num_rcv_contexts)
13839 rcd = dd->rcd[ctxt];
13840 else {
13841 ret = -EINVAL;
13842 goto done;
13843 }
13844 if (!rcd || !rcd->sc) {
13845 ret = -EINVAL;
13846 goto done;
13847 }
13848 sctxt = rcd->sc->hw_context;
13849 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13850 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13851 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13852 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13853 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13854 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13855done:
13856 return ret;
13857}
13858
13859int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13860{
13861 struct hfi1_ctxtdata *rcd;
13862 unsigned sctxt;
13863 int ret = 0;
13864 u64 reg;
13865
13866 if (ctxt < dd->num_rcv_contexts)
13867 rcd = dd->rcd[ctxt];
13868 else {
13869 ret = -EINVAL;
13870 goto done;
13871 }
13872 if (!rcd || !rcd->sc) {
13873 ret = -EINVAL;
13874 goto done;
13875 }
13876 sctxt = rcd->sc->hw_context;
13877 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13878 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13879 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13880 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13881done:
13882 return ret;
13883}
13884
13885/*
13886 * Start doing the clean up the the chip. Our clean up happens in multiple
13887 * stages and this is just the first.
13888 */
13889void hfi1_start_cleanup(struct hfi1_devdata *dd)
13890{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013891 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013892 free_cntrs(dd);
13893 free_rcverr(dd);
13894 clean_up_interrupts(dd);
13895}
13896
13897#define HFI_BASE_GUID(dev) \
13898 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13899
13900/*
13901 * Certain chip functions need to be initialized only once per asic
13902 * instead of per-device. This function finds the peer device and
13903 * checks whether that chip initialization needs to be done by this
13904 * device.
13905 */
13906static void asic_should_init(struct hfi1_devdata *dd)
13907{
13908 unsigned long flags;
13909 struct hfi1_devdata *tmp, *peer = NULL;
13910
13911 spin_lock_irqsave(&hfi1_devs_lock, flags);
13912 /* Find our peer device */
13913 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13914 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13915 dd->unit != tmp->unit) {
13916 peer = tmp;
13917 break;
13918 }
13919 }
13920
13921 /*
13922 * "Claim" the ASIC for initialization if it hasn't been
13923 " "claimed" yet.
13924 */
13925 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13926 dd->flags |= HFI1_DO_INIT_ASIC;
13927 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13928}
13929
Dean Luick5d9157a2015-11-16 21:59:34 -050013930/*
13931 * Set dd->boardname. Use a generic name if a name is not returned from
13932 * EFI variable space.
13933 *
13934 * Return 0 on success, -ENOMEM if space could not be allocated.
13935 */
13936static int obtain_boardname(struct hfi1_devdata *dd)
13937{
13938 /* generic board description */
13939 const char generic[] =
13940 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13941 unsigned long size;
13942 int ret;
13943
13944 ret = read_hfi1_efi_var(dd, "description", &size,
13945 (void **)&dd->boardname);
13946 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080013947 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050013948 /* use generic description */
13949 dd->boardname = kstrdup(generic, GFP_KERNEL);
13950 if (!dd->boardname)
13951 return -ENOMEM;
13952 }
13953 return 0;
13954}
13955
Mike Marciniszyn77241052015-07-30 15:17:43 -040013956/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013957 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013958 * @dev: the pci_dev for hfi1_ib device
13959 * @ent: pci_device_id struct for this dev
13960 *
13961 * Also allocates, initializes, and returns the devdata struct for this
13962 * device instance
13963 *
13964 * This is global, and is called directly at init to set up the
13965 * chip-specific function pointers for later use.
13966 */
13967struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13968 const struct pci_device_id *ent)
13969{
13970 struct hfi1_devdata *dd;
13971 struct hfi1_pportdata *ppd;
13972 u64 reg;
13973 int i, ret;
13974 static const char * const inames[] = { /* implementation names */
13975 "RTL silicon",
13976 "RTL VCS simulation",
13977 "RTL FPGA emulation",
13978 "Functional simulator"
13979 };
13980
13981 dd = hfi1_alloc_devdata(pdev,
13982 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
13983 if (IS_ERR(dd))
13984 goto bail;
13985 ppd = dd->pport;
13986 for (i = 0; i < dd->num_pports; i++, ppd++) {
13987 int vl;
13988 /* init common fields */
13989 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13990 /* DC supports 4 link widths */
13991 ppd->link_width_supported =
13992 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13993 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13994 ppd->link_width_downgrade_supported =
13995 ppd->link_width_supported;
13996 /* start out enabling only 4X */
13997 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13998 ppd->link_width_downgrade_enabled =
13999 ppd->link_width_downgrade_supported;
14000 /* link width active is 0 when link is down */
14001 /* link width downgrade active is 0 when link is down */
14002
14003 if (num_vls < HFI1_MIN_VLS_SUPPORTED
14004 || num_vls > HFI1_MAX_VLS_SUPPORTED) {
14005 hfi1_early_err(&pdev->dev,
14006 "Invalid num_vls %u, using %u VLs\n",
14007 num_vls, HFI1_MAX_VLS_SUPPORTED);
14008 num_vls = HFI1_MAX_VLS_SUPPORTED;
14009 }
14010 ppd->vls_supported = num_vls;
14011 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014012 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014013 /* Set the default MTU. */
14014 for (vl = 0; vl < num_vls; vl++)
14015 dd->vld[vl].mtu = hfi1_max_mtu;
14016 dd->vld[15].mtu = MAX_MAD_PACKET;
14017 /*
14018 * Set the initial values to reasonable default, will be set
14019 * for real when link is up.
14020 */
14021 ppd->lstate = IB_PORT_DOWN;
14022 ppd->overrun_threshold = 0x4;
14023 ppd->phy_error_threshold = 0xf;
14024 ppd->port_crc_mode_enabled = link_crc_mask;
14025 /* initialize supported LTP CRC mode */
14026 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14027 /* initialize enabled LTP CRC mode */
14028 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14029 /* start in offline */
14030 ppd->host_link_state = HLS_DN_OFFLINE;
14031 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014032 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014033 }
14034
14035 dd->link_default = HLS_DN_POLL;
14036
14037 /*
14038 * Do remaining PCIe setup and save PCIe values in dd.
14039 * Any error printing is already done by the init code.
14040 * On return, we have the chip mapped.
14041 */
14042 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14043 if (ret < 0)
14044 goto bail_free;
14045
14046 /* verify that reads actually work, save revision for reset check */
14047 dd->revision = read_csr(dd, CCE_REVISION);
14048 if (dd->revision == ~(u64)0) {
14049 dd_dev_err(dd, "cannot read chip CSRs\n");
14050 ret = -EINVAL;
14051 goto bail_cleanup;
14052 }
14053 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14054 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14055 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14056 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14057
14058 /* obtain the hardware ID - NOT related to unit, which is a
14059 software enumeration */
14060 reg = read_csr(dd, CCE_REVISION2);
14061 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14062 & CCE_REVISION2_HFI_ID_MASK;
14063 /* the variable size will remove unwanted bits */
14064 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14065 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14066 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14067 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
14068 (int)dd->irev);
14069
14070 /* speeds the hardware can support */
14071 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14072 /* speeds allowed to run at */
14073 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14074 /* give a reasonable active value, will be set on link up */
14075 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14076
14077 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14078 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14079 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14080 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14081 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14082 /* fix up link widths for emulation _p */
14083 ppd = dd->pport;
14084 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14085 ppd->link_width_supported =
14086 ppd->link_width_enabled =
14087 ppd->link_width_downgrade_supported =
14088 ppd->link_width_downgrade_enabled =
14089 OPA_LINK_WIDTH_1X;
14090 }
14091 /* insure num_vls isn't larger than number of sdma engines */
14092 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14093 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014094 num_vls, dd->chip_sdma_engines);
14095 num_vls = dd->chip_sdma_engines;
14096 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014097 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014098 }
14099
14100 /*
14101 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14102 * Limit the max if larger than the field holds. If timeout is
14103 * non-zero, then the calculated field will be at least 1.
14104 *
14105 * Must be after icode is set up - the cclock rate depends
14106 * on knowing the hardware being used.
14107 */
14108 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14109 if (dd->rcv_intr_timeout_csr >
14110 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14111 dd->rcv_intr_timeout_csr =
14112 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14113 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14114 dd->rcv_intr_timeout_csr = 1;
14115
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014116 /* needs to be done before we look for the peer device */
14117 read_guid(dd);
14118
14119 /* should this device init the ASIC block? */
14120 asic_should_init(dd);
14121
Mike Marciniszyn77241052015-07-30 15:17:43 -040014122 /* obtain chip sizes, reset chip CSRs */
14123 init_chip(dd);
14124
14125 /* read in the PCIe link speed information */
14126 ret = pcie_speeds(dd);
14127 if (ret)
14128 goto bail_cleanup;
14129
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014130 /* Needs to be called before hfi1_firmware_init */
14131 get_platform_config(dd);
14132
Mike Marciniszyn77241052015-07-30 15:17:43 -040014133 /* read in firmware */
14134 ret = hfi1_firmware_init(dd);
14135 if (ret)
14136 goto bail_cleanup;
14137
14138 /*
14139 * In general, the PCIe Gen3 transition must occur after the
14140 * chip has been idled (so it won't initiate any PCIe transactions
14141 * e.g. an interrupt) and before the driver changes any registers
14142 * (the transition will reset the registers).
14143 *
14144 * In particular, place this call after:
14145 * - init_chip() - the chip will not initiate any PCIe transactions
14146 * - pcie_speeds() - reads the current link speed
14147 * - hfi1_firmware_init() - the needed firmware is ready to be
14148 * downloaded
14149 */
14150 ret = do_pcie_gen3_transition(dd);
14151 if (ret)
14152 goto bail_cleanup;
14153
14154 /* start setting dd values and adjusting CSRs */
14155 init_early_variables(dd);
14156
14157 parse_platform_config(dd);
14158
Dean Luick5d9157a2015-11-16 21:59:34 -050014159 ret = obtain_boardname(dd);
14160 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014161 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014162
14163 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014164 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014165 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014166 (u32)dd->majrev,
14167 (u32)dd->minrev,
14168 (dd->revision >> CCE_REVISION_SW_SHIFT)
14169 & CCE_REVISION_SW_MASK);
14170
14171 ret = set_up_context_variables(dd);
14172 if (ret)
14173 goto bail_cleanup;
14174
14175 /* set initial RXE CSRs */
14176 init_rxe(dd);
14177 /* set initial TXE CSRs */
14178 init_txe(dd);
14179 /* set initial non-RXE, non-TXE CSRs */
14180 init_other(dd);
14181 /* set up KDETH QP prefix in both RX and TX CSRs */
14182 init_kdeth_qp(dd);
14183
Mitko Haralanov957558c2016-02-03 14:33:40 -080014184 ret = hfi1_dev_affinity_init(dd);
14185 if (ret)
14186 goto bail_cleanup;
14187
Mike Marciniszyn77241052015-07-30 15:17:43 -040014188 /* send contexts must be set up before receive contexts */
14189 ret = init_send_contexts(dd);
14190 if (ret)
14191 goto bail_cleanup;
14192
14193 ret = hfi1_create_ctxts(dd);
14194 if (ret)
14195 goto bail_cleanup;
14196
14197 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14198 /*
14199 * rcd[0] is guaranteed to be valid by this point. Also, all
14200 * context are using the same value, as per the module parameter.
14201 */
14202 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14203
14204 ret = init_pervl_scs(dd);
14205 if (ret)
14206 goto bail_cleanup;
14207
14208 /* sdma init */
14209 for (i = 0; i < dd->num_pports; ++i) {
14210 ret = sdma_init(dd, i);
14211 if (ret)
14212 goto bail_cleanup;
14213 }
14214
14215 /* use contexts created by hfi1_create_ctxts */
14216 ret = set_up_interrupts(dd);
14217 if (ret)
14218 goto bail_cleanup;
14219
14220 /* set up LCB access - must be after set_up_interrupts() */
14221 init_lcb_access(dd);
14222
14223 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14224 dd->base_guid & 0xFFFFFF);
14225
14226 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14227 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14228 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14229
14230 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14231 if (ret)
14232 goto bail_clear_intr;
14233 check_fabric_firmware_versions(dd);
14234
14235 thermal_init(dd);
14236
14237 ret = init_cntrs(dd);
14238 if (ret)
14239 goto bail_clear_intr;
14240
14241 ret = init_rcverr(dd);
14242 if (ret)
14243 goto bail_free_cntrs;
14244
14245 ret = eprom_init(dd);
14246 if (ret)
14247 goto bail_free_rcverr;
14248
14249 goto bail;
14250
14251bail_free_rcverr:
14252 free_rcverr(dd);
14253bail_free_cntrs:
14254 free_cntrs(dd);
14255bail_clear_intr:
14256 clean_up_interrupts(dd);
14257bail_cleanup:
14258 hfi1_pcie_ddcleanup(dd);
14259bail_free:
14260 hfi1_free_devdata(dd);
14261 dd = ERR_PTR(ret);
14262bail:
14263 return dd;
14264}
14265
14266static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14267 u32 dw_len)
14268{
14269 u32 delta_cycles;
14270 u32 current_egress_rate = ppd->current_egress_rate;
14271 /* rates here are in units of 10^6 bits/sec */
14272
14273 if (desired_egress_rate == -1)
14274 return 0; /* shouldn't happen */
14275
14276 if (desired_egress_rate >= current_egress_rate)
14277 return 0; /* we can't help go faster, only slower */
14278
14279 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14280 egress_cycles(dw_len * 4, current_egress_rate);
14281
14282 return (u16)delta_cycles;
14283}
14284
14285
14286/**
14287 * create_pbc - build a pbc for transmission
14288 * @flags: special case flags or-ed in built pbc
14289 * @srate: static rate
14290 * @vl: vl
14291 * @dwlen: dword length (header words + data words + pbc words)
14292 *
14293 * Create a PBC with the given flags, rate, VL, and length.
14294 *
14295 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14296 * for verbs, which does not use this PSM feature. The lone other caller
14297 * is for the diagnostic interface which calls this if the user does not
14298 * supply their own PBC.
14299 */
14300u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14301 u32 dw_len)
14302{
14303 u64 pbc, delay = 0;
14304
14305 if (unlikely(srate_mbs))
14306 delay = delay_cycles(ppd, srate_mbs, dw_len);
14307
14308 pbc = flags
14309 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14310 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14311 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14312 | (dw_len & PBC_LENGTH_DWS_MASK)
14313 << PBC_LENGTH_DWS_SHIFT;
14314
14315 return pbc;
14316}
14317
14318#define SBUS_THERMAL 0x4f
14319#define SBUS_THERM_MONITOR_MODE 0x1
14320
14321#define THERM_FAILURE(dev, ret, reason) \
14322 dd_dev_err((dd), \
14323 "Thermal sensor initialization failed: %s (%d)\n", \
14324 (reason), (ret))
14325
14326/*
14327 * Initialize the Avago Thermal sensor.
14328 *
14329 * After initialization, enable polling of thermal sensor through
14330 * SBus interface. In order for this to work, the SBus Master
14331 * firmware has to be loaded due to the fact that the HW polling
14332 * logic uses SBus interrupts, which are not supported with
14333 * default firmware. Otherwise, no data will be returned through
14334 * the ASIC_STS_THERM CSR.
14335 */
14336static int thermal_init(struct hfi1_devdata *dd)
14337{
14338 int ret = 0;
14339
14340 if (dd->icode != ICODE_RTL_SILICON ||
14341 !(dd->flags & HFI1_DO_INIT_ASIC))
14342 return ret;
14343
14344 acquire_hw_mutex(dd);
14345 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014346 /* Disable polling of thermal readings */
14347 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14348 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014349 /* Thermal Sensor Initialization */
14350 /* Step 1: Reset the Thermal SBus Receiver */
14351 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14352 RESET_SBUS_RECEIVER, 0);
14353 if (ret) {
14354 THERM_FAILURE(dd, ret, "Bus Reset");
14355 goto done;
14356 }
14357 /* Step 2: Set Reset bit in Thermal block */
14358 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14359 WRITE_SBUS_RECEIVER, 0x1);
14360 if (ret) {
14361 THERM_FAILURE(dd, ret, "Therm Block Reset");
14362 goto done;
14363 }
14364 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14365 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14366 WRITE_SBUS_RECEIVER, 0x32);
14367 if (ret) {
14368 THERM_FAILURE(dd, ret, "Write Clock Div");
14369 goto done;
14370 }
14371 /* Step 4: Select temperature mode */
14372 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14373 WRITE_SBUS_RECEIVER,
14374 SBUS_THERM_MONITOR_MODE);
14375 if (ret) {
14376 THERM_FAILURE(dd, ret, "Write Mode Sel");
14377 goto done;
14378 }
14379 /* Step 5: De-assert block reset and start conversion */
14380 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14381 WRITE_SBUS_RECEIVER, 0x2);
14382 if (ret) {
14383 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14384 goto done;
14385 }
14386 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14387 msleep(22);
14388
14389 /* Enable polling of thermal readings */
14390 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14391done:
14392 release_hw_mutex(dd);
14393 return ret;
14394}
14395
14396static void handle_temp_err(struct hfi1_devdata *dd)
14397{
14398 struct hfi1_pportdata *ppd = &dd->pport[0];
14399 /*
14400 * Thermal Critical Interrupt
14401 * Put the device into forced freeze mode, take link down to
14402 * offline, and put DC into reset.
14403 */
14404 dd_dev_emerg(dd,
14405 "Critical temperature reached! Forcing device into freeze mode!\n");
14406 dd->flags |= HFI1_FORCED_FREEZE;
14407 start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14408 /*
14409 * Shut DC down as much and as quickly as possible.
14410 *
14411 * Step 1: Take the link down to OFFLINE. This will cause the
14412 * 8051 to put the Serdes in reset. However, we don't want to
14413 * go through the entire link state machine since we want to
14414 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14415 * but rather an attempt to save the chip.
14416 * Code below is almost the same as quiet_serdes() but avoids
14417 * all the extra work and the sleeps.
14418 */
14419 ppd->driver_link_ready = 0;
14420 ppd->link_enabled = 0;
14421 set_physical_link_state(dd, PLS_OFFLINE |
14422 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14423 /*
14424 * Step 2: Shutdown LCB and 8051
14425 * After shutdown, do not restore DC_CFG_RESET value.
14426 */
14427 dc_shutdown(dd);
14428}