blob: 0a774656cb74bb6e8df9c5754e59ec39f1bce556 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080067#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080068#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040069
70#define NUM_IB_PORTS 1
71
72uint kdeth_qp;
73module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
74MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75
76uint num_vls = HFI1_MAX_VLS_SUPPORTED;
77module_param(num_vls, uint, S_IRUGO);
78MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
79
80/*
81 * Default time to aggregate two 10K packets from the idle state
82 * (timer not running). The timer starts at the end of the first packet,
83 * so only the time for one 10K packet and header plus a bit extra is needed.
84 * 10 * 1024 + 64 header byte = 10304 byte
85 * 10304 byte / 12.5 GB/s = 824.32ns
86 */
87uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
88module_param(rcv_intr_timeout, uint, S_IRUGO);
89MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90
91uint rcv_intr_count = 16; /* same as qib */
92module_param(rcv_intr_count, uint, S_IRUGO);
93MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94
95ushort link_crc_mask = SUPPORTED_CRCS;
96module_param(link_crc_mask, ushort, S_IRUGO);
97MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
98
99uint loopback;
100module_param_named(loopback, loopback, uint, S_IRUGO);
101MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102
103/* Other driver tunables */
104uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
105static ushort crc_14b_sideband = 1;
106static uint use_flr = 1;
107uint quick_linkup; /* skip LNI */
108
109struct flag_table {
110 u64 flag; /* the flag */
111 char *str; /* description string */
112 u16 extra; /* extra information */
113 u16 unused0;
114 u32 unused1;
115};
116
117/* str must be a string constant */
118#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
119#define FLAG_ENTRY0(str, flag) {flag, str, 0}
120
121/* Send Error Consequences */
122#define SEC_WRITE_DROPPED 0x1
123#define SEC_PACKET_DROPPED 0x2
124#define SEC_SC_HALTED 0x4 /* per-context only */
125#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400129#define NUM_MAP_REGS 32
130
131/* Bit offset into the GUID which carries HFI id information */
132#define GUID_HFI_INDEX_SHIFT 39
133
134/* extract the emulation revision */
135#define emulator_rev(dd) ((dd)->irev >> 8)
136/* parallel and serial emulation versions are 3 and 4 respectively */
137#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
138#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
139
140/* RSM fields */
141
142/* packet type */
143#define IB_PACKET_TYPE 2ull
144#define QW_SHIFT 6ull
145/* QPN[7..1] */
146#define QPN_WIDTH 7ull
147
148/* LRH.BTH: QW 0, OFFSET 48 - for match */
149#define LRH_BTH_QW 0ull
150#define LRH_BTH_BIT_OFFSET 48ull
151#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
152#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
153#define LRH_BTH_SELECT
154#define LRH_BTH_MASK 3ull
155#define LRH_BTH_VALUE 2ull
156
157/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
158#define LRH_SC_QW 0ull
159#define LRH_SC_BIT_OFFSET 56ull
160#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
161#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
162#define LRH_SC_MASK 128ull
163#define LRH_SC_VALUE 0ull
164
165/* SC[n..0] QW 0, OFFSET 60 - for select */
166#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
167
168/* QPN[m+n:1] QW 1, OFFSET 1 */
169#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
170
171/* defines to build power on SC2VL table */
172#define SC2VL_VAL( \
173 num, \
174 sc0, sc0val, \
175 sc1, sc1val, \
176 sc2, sc2val, \
177 sc3, sc3val, \
178 sc4, sc4val, \
179 sc5, sc5val, \
180 sc6, sc6val, \
181 sc7, sc7val) \
182( \
183 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
184 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
185 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
186 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
187 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
188 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
189 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
190 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
191)
192
193#define DC_SC_VL_VAL( \
194 range, \
195 e0, e0val, \
196 e1, e1val, \
197 e2, e2val, \
198 e3, e3val, \
199 e4, e4val, \
200 e5, e5val, \
201 e6, e6val, \
202 e7, e7val, \
203 e8, e8val, \
204 e9, e9val, \
205 e10, e10val, \
206 e11, e11val, \
207 e12, e12val, \
208 e13, e13val, \
209 e14, e14val, \
210 e15, e15val) \
211( \
212 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
213 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
214 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
215 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
216 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
217 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
218 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
219 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
220 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
221 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
222 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
223 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
224 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
225 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
226 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
227 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
228)
229
230/* all CceStatus sub-block freeze bits */
231#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
232 | CCE_STATUS_RXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
235/* all CceStatus sub-block TXE pause bits */
236#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
237 | CCE_STATUS_TXE_PAUSED_SMASK \
238 | CCE_STATUS_SDMA_PAUSED_SMASK)
239/* all CceStatus sub-block RXE pause bits */
240#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
241
242/*
243 * CCE Error flags.
244 */
245static struct flag_table cce_err_status_flags[] = {
246/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
247 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
248/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
250/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
251 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
252/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
253 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
254/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
255 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
256/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
257 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
258/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
259 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
260/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
261 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
262/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
264/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
266/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
268/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
270/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
272/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
274/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
275 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
276/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
278/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
280/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
282/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
283 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
284/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
286/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
287 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
288/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
290/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
291 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
292/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
294/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
295 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
296/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
298/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
299 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
300/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
302/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
303 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
304/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
305 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
306/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
307 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
308/*31*/ FLAG_ENTRY0("LATriggered",
309 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
310/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
312/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
313 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
314/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
315 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
316/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
317 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
318/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
320/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
321 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
322/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
324/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
325 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
326/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
327 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
328/*41-63 reserved*/
329};
330
331/*
332 * Misc Error flags
333 */
334#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
335static struct flag_table misc_err_status_flags[] = {
336/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
337/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
338/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
339/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
340/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
341/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
342/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
343/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
344/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
345/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
346/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
347/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
348/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
349};
350
351/*
352 * TXE PIO Error flags and consequences
353 */
354static struct flag_table pio_err_status_flags[] = {
355/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
356 SEC_WRITE_DROPPED,
357 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
358/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
359 SEC_SPC_FREEZE,
360 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
361/* 2*/ FLAG_ENTRY("PioCsrParity",
362 SEC_SPC_FREEZE,
363 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
364/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
365 SEC_SPC_FREEZE,
366 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
367/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
368 SEC_SPC_FREEZE,
369 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
370/* 5*/ FLAG_ENTRY("PioPccFifoParity",
371 SEC_SPC_FREEZE,
372 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
373/* 6*/ FLAG_ENTRY("PioPecFifoParity",
374 SEC_SPC_FREEZE,
375 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
376/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
377 SEC_SPC_FREEZE,
378 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
379/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
380 SEC_SPC_FREEZE,
381 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
382/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
383 SEC_SPC_FREEZE,
384 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
385/*10*/ FLAG_ENTRY("PioSmPktResetParity",
386 SEC_SPC_FREEZE,
387 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
388/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
389 SEC_SPC_FREEZE,
390 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
391/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
394/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
395 0,
396 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
397/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
398 0,
399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
400/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
401 SEC_SPC_FREEZE,
402 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
403/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
406/*17*/ FLAG_ENTRY("PioInitSmIn",
407 0,
408 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
409/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
410 SEC_SPC_FREEZE,
411 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
412/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
415/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
416 0,
417 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
418/*21*/ FLAG_ENTRY("PioWriteDataParity",
419 SEC_SPC_FREEZE,
420 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
421/*22*/ FLAG_ENTRY("PioStateMachine",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
424/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800425 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400426 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
427/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800428 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400429 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
430/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
431 SEC_SPC_FREEZE,
432 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
433/*26*/ FLAG_ENTRY("PioVlfSopParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
436/*27*/ FLAG_ENTRY("PioVlFifoParity",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
439/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
440 SEC_SPC_FREEZE,
441 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
442/*29*/ FLAG_ENTRY("PioPpmcSopLen",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
445/*30-31 reserved*/
446/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
447 SEC_SPC_FREEZE,
448 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
449/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
450 SEC_SPC_FREEZE,
451 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
452/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
453 SEC_SPC_FREEZE,
454 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
455/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
456 SEC_SPC_FREEZE,
457 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
458/*36-63 reserved*/
459};
460
461/* TXE PIO errors that cause an SPC freeze */
462#define ALL_PIO_FREEZE_ERR \
463 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
492
493/*
494 * TXE SDMA Error flags
495 */
496static struct flag_table sdma_err_status_flags[] = {
497/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
498 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
499/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
500 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
501/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
503/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
504 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
505/*04-63 reserved*/
506};
507
508/* TXE SDMA errors that cause an SPC freeze */
509#define ALL_SDMA_FREEZE_ERR \
510 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
512 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
513
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800514/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
515#define PORT_DISCARD_EGRESS_ERRS \
516 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
518 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
519
Mike Marciniszyn77241052015-07-30 15:17:43 -0400520/*
521 * TXE Egress Error flags
522 */
523#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
524static struct flag_table egress_err_status_flags[] = {
525/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
526/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
527/* 2 reserved */
528/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
529 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
530/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
531/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
532/* 6 reserved */
533/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
534 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
535/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
536 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
537/* 9-10 reserved */
538/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
539 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
540/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
541/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
542/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
543/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
544/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
545 SEES(TX_SDMA0_DISALLOWED_PACKET)),
546/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
547 SEES(TX_SDMA1_DISALLOWED_PACKET)),
548/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
549 SEES(TX_SDMA2_DISALLOWED_PACKET)),
550/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
551 SEES(TX_SDMA3_DISALLOWED_PACKET)),
552/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
553 SEES(TX_SDMA4_DISALLOWED_PACKET)),
554/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
555 SEES(TX_SDMA5_DISALLOWED_PACKET)),
556/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
557 SEES(TX_SDMA6_DISALLOWED_PACKET)),
558/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
559 SEES(TX_SDMA7_DISALLOWED_PACKET)),
560/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
561 SEES(TX_SDMA8_DISALLOWED_PACKET)),
562/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
563 SEES(TX_SDMA9_DISALLOWED_PACKET)),
564/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
565 SEES(TX_SDMA10_DISALLOWED_PACKET)),
566/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
567 SEES(TX_SDMA11_DISALLOWED_PACKET)),
568/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
569 SEES(TX_SDMA12_DISALLOWED_PACKET)),
570/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
571 SEES(TX_SDMA13_DISALLOWED_PACKET)),
572/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
573 SEES(TX_SDMA14_DISALLOWED_PACKET)),
574/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
575 SEES(TX_SDMA15_DISALLOWED_PACKET)),
576/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
578/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
580/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
582/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
584/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
586/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
588/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
590/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
592/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
594/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
595/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
596/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
597/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
598/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
599/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
600/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
601/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
602/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
603/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
604/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
605/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
606/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
607/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
608/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
609/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
610/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
611/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
612/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
613/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
614/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
615/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
616 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
617/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
618 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
619};
620
621/*
622 * TXE Egress Error Info flags
623 */
624#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
625static struct flag_table egress_err_info_flags[] = {
626/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
627/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
628/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
630/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
631/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
632/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
633/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
634/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
635/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
636/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
637/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
638/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
639/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
640/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
641/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
642/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
643/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
644/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
645/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
646/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
647/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
648};
649
650/* TXE Egress errors that cause an SPC freeze */
651#define ALL_TXE_EGRESS_FREEZE_ERR \
652 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
653 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
655 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
656 | SEES(TX_LAUNCH_CSR_PARITY) \
657 | SEES(TX_SBRD_CTL_CSR_PARITY) \
658 | SEES(TX_CONFIG_PARITY) \
659 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
668 | SEES(TX_CREDIT_RETURN_PARITY))
669
670/*
671 * TXE Send error flags
672 */
673#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
674static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500675/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400676/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
677/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
678};
679
680/*
681 * TXE Send Context Error flags and consequences
682 */
683static struct flag_table sc_err_status_flags[] = {
684/* 0*/ FLAG_ENTRY("InconsistentSop",
685 SEC_PACKET_DROPPED | SEC_SC_HALTED,
686 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
687/* 1*/ FLAG_ENTRY("DisallowedPacket",
688 SEC_PACKET_DROPPED | SEC_SC_HALTED,
689 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
690/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
691 SEC_WRITE_DROPPED | SEC_SC_HALTED,
692 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
693/* 3*/ FLAG_ENTRY("WriteOverflow",
694 SEC_WRITE_DROPPED | SEC_SC_HALTED,
695 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
696/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
697 SEC_WRITE_DROPPED | SEC_SC_HALTED,
698 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
699/* 5-63 reserved*/
700};
701
702/*
703 * RXE Receive Error flags
704 */
705#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
706static struct flag_table rxe_err_status_flags[] = {
707/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
708/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
709/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
710/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
711/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
712/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
713/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
714/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
715/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
716/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
717/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
718/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
719/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
720/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
721/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
722/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
723/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
724 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
725/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
726/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
727/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
728 RXES(RBUF_BLOCK_LIST_READ_UNC)),
729/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
730 RXES(RBUF_BLOCK_LIST_READ_COR)),
731/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
732 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
733/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
734 RXES(RBUF_CSR_QENT_CNT_PARITY)),
735/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
736 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
737/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
738 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
739/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
740/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
741/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
742 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
743/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
744/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
745/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
746/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
747/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
748/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
749/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
750/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
751 RXES(RBUF_FL_INITDONE_PARITY)),
752/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
753 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
754/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
755/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
756/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
757/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
758 RXES(LOOKUP_DES_PART1_UNC_COR)),
759/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
760 RXES(LOOKUP_DES_PART2_PARITY)),
761/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
762/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
763/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
764/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
765/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
766/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
767/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
768/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
769/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
770/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
771/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
772/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
773/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
774/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
775/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
776/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
777/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
778/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
779/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
780/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
781/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
782/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
783};
784
785/* RXE errors that will trigger an SPC freeze */
786#define ALL_RXE_FREEZE_ERR \
787 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
831
832#define RXE_FREEZE_ABORT_MASK \
833 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
835 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
836
837/*
838 * DCC Error Flags
839 */
840#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
841static struct flag_table dcc_err_flags[] = {
842 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
843 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
844 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
845 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
847 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
848 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
849 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
850 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
851 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
852 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
853 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
854 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
855 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
856 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
857 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
858 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
859 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
860 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
861 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
862 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
863 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
864 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
865 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
866 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
867 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
868 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
869 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
870 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
871 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
872 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
873 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
875 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
876 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
877 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
878 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
879 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
880 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
881 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
882 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
883 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
884 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
886 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
887 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
888};
889
890/*
891 * LCB error flags
892 */
893#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
894static struct flag_table lcb_err_flags[] = {
895/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
896/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
897/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
898/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
899 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
900/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
901/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
902/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
903/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
904/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
905/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
906/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
907/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
908/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
909/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
910 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
911/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
912/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
913/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
914/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
915/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
916/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
917 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
918/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
919/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
920/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
921/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
922/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
923/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
924/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
925 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
926/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
927/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
928 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
929/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
930 LCBE(REDUNDANT_FLIT_PARITY_ERR))
931};
932
933/*
934 * DC8051 Error Flags
935 */
936#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
937static struct flag_table dc8051_err_flags[] = {
938 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
939 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
940 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
941 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
942 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
943 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
944 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
945 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
946 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800947 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400948 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
949};
950
951/*
952 * DC8051 Information Error flags
953 *
954 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
955 */
956static struct flag_table dc8051_info_err_flags[] = {
957 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
958 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
959 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
960 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800961 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400962 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
963 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
964 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
965 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
966 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
968 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
969 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
970};
971
972/*
973 * DC8051 Information Host Information flags
974 *
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976 */
977static struct flag_table dc8051_info_host_msg_flags[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
987};
988
Mike Marciniszyn77241052015-07-30 15:17:43 -0400989static u32 encoded_size(u32 size);
990static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993 u8 *continuous);
994static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997 u8 *remote_tx_rate, u16 *link_widths);
998static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999 u8 *flag_bits, u16 *link_widths);
1000static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001 u8 *device_rev);
1002static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005 u8 *tx_polarity_inversion,
1006 u8 *rx_polarity_inversion, u8 *max_rate);
1007static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008 unsigned int context, u64 err_status);
1009static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010static void handle_dcc_err(struct hfi1_devdata *dd,
1011 unsigned int context, u64 err_status);
1012static void handle_lcb_err(struct hfi1_devdata *dd,
1013 unsigned int context, u64 err_status);
1014static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void set_partition_keys(struct hfi1_pportdata *);
1023static const char *link_state_name(u32 state);
1024static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025 u32 state);
1026static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027 u64 *out_data);
1028static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029static int thermal_init(struct hfi1_devdata *dd);
1030
1031static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032 int msecs);
1033static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1034static void handle_temp_err(struct hfi1_devdata *);
1035static void dc_shutdown(struct hfi1_devdata *);
1036static void dc_start(struct hfi1_devdata *);
1037
1038/*
1039 * Error interrupt table entry. This is used as input to the interrupt
1040 * "clear down" routine used for all second tier error interrupt register.
1041 * Second tier interrupt registers have a single bit representing them
1042 * in the top-level CceIntStatus.
1043 */
1044struct err_reg_info {
1045 u32 status; /* status CSR offset */
1046 u32 clear; /* clear CSR offset */
1047 u32 mask; /* mask CSR offset */
1048 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1049 const char *desc;
1050};
1051
1052#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1053#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1054#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1055
1056/*
1057 * Helpers for building HFI and DC error interrupt table entries. Different
1058 * helpers are needed because of inconsistent register names.
1059 */
1060#define EE(reg, handler, desc) \
1061 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1062 handler, desc }
1063#define DC_EE1(reg, handler, desc) \
1064 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1065#define DC_EE2(reg, handler, desc) \
1066 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1067
1068/*
1069 * Table of the "misc" grouping of error interrupts. Each entry refers to
1070 * another register containing more information.
1071 */
1072static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1073/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1074/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1075/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1076/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1077/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1078/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1079/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1080/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1081 /* the rest are reserved */
1082};
1083
1084/*
1085 * Index into the Various section of the interrupt sources
1086 * corresponding to the Critical Temperature interrupt.
1087 */
1088#define TCRIT_INT_SOURCE 4
1089
1090/*
1091 * SDMA error interrupt entry - refers to another register containing more
1092 * information.
1093 */
1094static const struct err_reg_info sdma_eng_err =
1095 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1096
1097static const struct err_reg_info various_err[NUM_VARIOUS] = {
1098/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1099/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1100/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1101/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1102/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1103 /* rest are reserved */
1104};
1105
1106/*
1107 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1108 * register can not be derived from the MTU value because 10K is not
1109 * a power of 2. Therefore, we need a constant. Everything else can
1110 * be calculated.
1111 */
1112#define DCC_CFG_PORT_MTU_CAP_10240 7
1113
1114/*
1115 * Table of the DC grouping of error interrupts. Each entry refers to
1116 * another register containing more information.
1117 */
1118static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1119/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1120/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1121/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1122/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1123 /* the rest are reserved */
1124};
1125
1126struct cntr_entry {
1127 /*
1128 * counter name
1129 */
1130 char *name;
1131
1132 /*
1133 * csr to read for name (if applicable)
1134 */
1135 u64 csr;
1136
1137 /*
1138 * offset into dd or ppd to store the counter's value
1139 */
1140 int offset;
1141
1142 /*
1143 * flags
1144 */
1145 u8 flags;
1146
1147 /*
1148 * accessor for stat element, context either dd or ppd
1149 */
Jubin John17fb4f22016-02-14 20:21:52 -08001150 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1151 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001152};
1153
1154#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1155#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1156
1157#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1158{ \
1159 name, \
1160 csr, \
1161 offset, \
1162 flags, \
1163 accessor \
1164}
1165
1166/* 32bit RXE */
1167#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1168CNTR_ELEM(#name, \
1169 (counter * 8 + RCV_COUNTER_ARRAY32), \
1170 0, flags | CNTR_32BIT, \
1171 port_access_u32_csr)
1172
1173#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1174CNTR_ELEM(#name, \
1175 (counter * 8 + RCV_COUNTER_ARRAY32), \
1176 0, flags | CNTR_32BIT, \
1177 dev_access_u32_csr)
1178
1179/* 64bit RXE */
1180#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1181CNTR_ELEM(#name, \
1182 (counter * 8 + RCV_COUNTER_ARRAY64), \
1183 0, flags, \
1184 port_access_u64_csr)
1185
1186#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1187CNTR_ELEM(#name, \
1188 (counter * 8 + RCV_COUNTER_ARRAY64), \
1189 0, flags, \
1190 dev_access_u64_csr)
1191
1192#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1193#define OVR_ELM(ctx) \
1194CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001195 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001196 0, CNTR_NORMAL, port_access_u64_csr)
1197
1198/* 32bit TXE */
1199#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1200CNTR_ELEM(#name, \
1201 (counter * 8 + SEND_COUNTER_ARRAY32), \
1202 0, flags | CNTR_32BIT, \
1203 port_access_u32_csr)
1204
1205/* 64bit TXE */
1206#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1207CNTR_ELEM(#name, \
1208 (counter * 8 + SEND_COUNTER_ARRAY64), \
1209 0, flags, \
1210 port_access_u64_csr)
1211
1212# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1213CNTR_ELEM(#name,\
1214 counter * 8 + SEND_COUNTER_ARRAY64, \
1215 0, \
1216 flags, \
1217 dev_access_u64_csr)
1218
1219/* CCE */
1220#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1221CNTR_ELEM(#name, \
1222 (counter * 8 + CCE_COUNTER_ARRAY32), \
1223 0, flags | CNTR_32BIT, \
1224 dev_access_u32_csr)
1225
1226#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1227CNTR_ELEM(#name, \
1228 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1229 0, flags | CNTR_32BIT, \
1230 dev_access_u32_csr)
1231
1232/* DC */
1233#define DC_PERF_CNTR(name, counter, flags) \
1234CNTR_ELEM(#name, \
1235 counter, \
1236 0, \
1237 flags, \
1238 dev_access_u64_csr)
1239
1240#define DC_PERF_CNTR_LCB(name, counter, flags) \
1241CNTR_ELEM(#name, \
1242 counter, \
1243 0, \
1244 flags, \
1245 dc_access_lcb_cntr)
1246
1247/* ibp counters */
1248#define SW_IBP_CNTR(name, cntr) \
1249CNTR_ELEM(#name, \
1250 0, \
1251 0, \
1252 CNTR_SYNTH, \
1253 access_ibp_##cntr)
1254
1255u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1256{
1257 u64 val;
1258
1259 if (dd->flags & HFI1_PRESENT) {
1260 val = readq((void __iomem *)dd->kregbase + offset);
1261 return val;
1262 }
1263 return -1;
1264}
1265
1266void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1267{
1268 if (dd->flags & HFI1_PRESENT)
1269 writeq(value, (void __iomem *)dd->kregbase + offset);
1270}
1271
1272void __iomem *get_csr_addr(
1273 struct hfi1_devdata *dd,
1274 u32 offset)
1275{
1276 return (void __iomem *)dd->kregbase + offset;
1277}
1278
1279static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1280 int mode, u64 value)
1281{
1282 u64 ret;
1283
Mike Marciniszyn77241052015-07-30 15:17:43 -04001284 if (mode == CNTR_MODE_R) {
1285 ret = read_csr(dd, csr);
1286 } else if (mode == CNTR_MODE_W) {
1287 write_csr(dd, csr, value);
1288 ret = value;
1289 } else {
1290 dd_dev_err(dd, "Invalid cntr register access mode");
1291 return 0;
1292 }
1293
1294 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1295 return ret;
1296}
1297
1298/* Dev Access */
1299static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001300 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001301{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301302 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001303 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001304
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001305 if (entry->flags & CNTR_SDMA) {
1306 if (vl == CNTR_INVALID_VL)
1307 return 0;
1308 csr += 0x100 * vl;
1309 } else {
1310 if (vl != CNTR_INVALID_VL)
1311 return 0;
1312 }
1313 return read_write_csr(dd, csr, mode, data);
1314}
1315
1316static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1317 void *context, int idx, int mode, u64 data)
1318{
1319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1320
1321 if (dd->per_sdma && idx < dd->num_sdma)
1322 return dd->per_sdma[idx].err_cnt;
1323 return 0;
1324}
1325
1326static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1327 void *context, int idx, int mode, u64 data)
1328{
1329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1330
1331 if (dd->per_sdma && idx < dd->num_sdma)
1332 return dd->per_sdma[idx].sdma_int_cnt;
1333 return 0;
1334}
1335
1336static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1337 void *context, int idx, int mode, u64 data)
1338{
1339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1340
1341 if (dd->per_sdma && idx < dd->num_sdma)
1342 return dd->per_sdma[idx].idle_int_cnt;
1343 return 0;
1344}
1345
1346static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1347 void *context, int idx, int mode,
1348 u64 data)
1349{
1350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1351
1352 if (dd->per_sdma && idx < dd->num_sdma)
1353 return dd->per_sdma[idx].progress_int_cnt;
1354 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001355}
1356
1357static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001358 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001359{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301360 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001361
1362 u64 val = 0;
1363 u64 csr = entry->csr;
1364
1365 if (entry->flags & CNTR_VL) {
1366 if (vl == CNTR_INVALID_VL)
1367 return 0;
1368 csr += 8 * vl;
1369 } else {
1370 if (vl != CNTR_INVALID_VL)
1371 return 0;
1372 }
1373
1374 val = read_write_csr(dd, csr, mode, data);
1375 return val;
1376}
1377
1378static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001379 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001380{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301381 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001382 u32 csr = entry->csr;
1383 int ret = 0;
1384
1385 if (vl != CNTR_INVALID_VL)
1386 return 0;
1387 if (mode == CNTR_MODE_R)
1388 ret = read_lcb_csr(dd, csr, &data);
1389 else if (mode == CNTR_MODE_W)
1390 ret = write_lcb_csr(dd, csr, data);
1391
1392 if (ret) {
1393 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1394 return 0;
1395 }
1396
1397 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1398 return data;
1399}
1400
1401/* Port Access */
1402static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001403 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001404{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301405 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001406
1407 if (vl != CNTR_INVALID_VL)
1408 return 0;
1409 return read_write_csr(ppd->dd, entry->csr, mode, data);
1410}
1411
1412static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001413 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001414{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301415 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001416 u64 val;
1417 u64 csr = entry->csr;
1418
1419 if (entry->flags & CNTR_VL) {
1420 if (vl == CNTR_INVALID_VL)
1421 return 0;
1422 csr += 8 * vl;
1423 } else {
1424 if (vl != CNTR_INVALID_VL)
1425 return 0;
1426 }
1427 val = read_write_csr(ppd->dd, csr, mode, data);
1428 return val;
1429}
1430
1431/* Software defined */
1432static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1433 u64 data)
1434{
1435 u64 ret;
1436
1437 if (mode == CNTR_MODE_R) {
1438 ret = *cntr;
1439 } else if (mode == CNTR_MODE_W) {
1440 *cntr = data;
1441 ret = data;
1442 } else {
1443 dd_dev_err(dd, "Invalid cntr sw access mode");
1444 return 0;
1445 }
1446
1447 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1448
1449 return ret;
1450}
1451
1452static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001453 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001454{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301455 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001456
1457 if (vl != CNTR_INVALID_VL)
1458 return 0;
1459 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1460}
1461
1462static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001463 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001464{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301465 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001466
1467 if (vl != CNTR_INVALID_VL)
1468 return 0;
1469 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1470}
1471
Dean Luick6d014532015-12-01 15:38:23 -05001472static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1473 void *context, int vl, int mode,
1474 u64 data)
1475{
1476 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1477
1478 if (vl != CNTR_INVALID_VL)
1479 return 0;
1480 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1481}
1482
Mike Marciniszyn77241052015-07-30 15:17:43 -04001483static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001484 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001485{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001486 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1487 u64 zero = 0;
1488 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001489
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001490 if (vl == CNTR_INVALID_VL)
1491 counter = &ppd->port_xmit_discards;
1492 else if (vl >= 0 && vl < C_VL_COUNT)
1493 counter = &ppd->port_xmit_discards_vl[vl];
1494 else
1495 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001496
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001497 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001498}
1499
1500static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001501 void *context, int vl, int mode,
1502 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001503{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301504 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001505
1506 if (vl != CNTR_INVALID_VL)
1507 return 0;
1508
1509 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1510 mode, data);
1511}
1512
1513static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001514 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001515{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301516 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001517
1518 if (vl != CNTR_INVALID_VL)
1519 return 0;
1520
1521 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1522 mode, data);
1523}
1524
1525u64 get_all_cpu_total(u64 __percpu *cntr)
1526{
1527 int cpu;
1528 u64 counter = 0;
1529
1530 for_each_possible_cpu(cpu)
1531 counter += *per_cpu_ptr(cntr, cpu);
1532 return counter;
1533}
1534
1535static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1536 u64 __percpu *cntr,
1537 int vl, int mode, u64 data)
1538{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001539 u64 ret = 0;
1540
1541 if (vl != CNTR_INVALID_VL)
1542 return 0;
1543
1544 if (mode == CNTR_MODE_R) {
1545 ret = get_all_cpu_total(cntr) - *z_val;
1546 } else if (mode == CNTR_MODE_W) {
1547 /* A write can only zero the counter */
1548 if (data == 0)
1549 *z_val = get_all_cpu_total(cntr);
1550 else
1551 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1552 } else {
1553 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1554 return 0;
1555 }
1556
1557 return ret;
1558}
1559
1560static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1561 void *context, int vl, int mode, u64 data)
1562{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301563 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001564
1565 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1566 mode, data);
1567}
1568
1569static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001570 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001571{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301572 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001573
1574 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1575 mode, data);
1576}
1577
1578static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1579 void *context, int vl, int mode, u64 data)
1580{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301581 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001582
1583 return dd->verbs_dev.n_piowait;
1584}
1585
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001586static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1587 void *context, int vl, int mode, u64 data)
1588{
1589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1590
1591 return dd->verbs_dev.n_piodrain;
1592}
1593
Mike Marciniszyn77241052015-07-30 15:17:43 -04001594static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1595 void *context, int vl, int mode, u64 data)
1596{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301597 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001598
1599 return dd->verbs_dev.n_txwait;
1600}
1601
1602static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1603 void *context, int vl, int mode, u64 data)
1604{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301605 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606
1607 return dd->verbs_dev.n_kmem_wait;
1608}
1609
Dean Luickb4219222015-10-26 10:28:35 -04001610static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001611 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001612{
1613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1614
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001615 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1616 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001617}
1618
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001619/* Software counters for the error status bits within MISC_ERR_STATUS */
1620static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1621 void *context, int vl, int mode,
1622 u64 data)
1623{
1624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1625
1626 return dd->misc_err_status_cnt[12];
1627}
1628
1629static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1630 void *context, int vl, int mode,
1631 u64 data)
1632{
1633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1634
1635 return dd->misc_err_status_cnt[11];
1636}
1637
1638static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1639 void *context, int vl, int mode,
1640 u64 data)
1641{
1642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1643
1644 return dd->misc_err_status_cnt[10];
1645}
1646
1647static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1648 void *context, int vl,
1649 int mode, u64 data)
1650{
1651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1652
1653 return dd->misc_err_status_cnt[9];
1654}
1655
1656static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1657 void *context, int vl, int mode,
1658 u64 data)
1659{
1660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1661
1662 return dd->misc_err_status_cnt[8];
1663}
1664
1665static u64 access_misc_efuse_read_bad_addr_err_cnt(
1666 const struct cntr_entry *entry,
1667 void *context, int vl, int mode, u64 data)
1668{
1669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1670
1671 return dd->misc_err_status_cnt[7];
1672}
1673
1674static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1675 void *context, int vl,
1676 int mode, u64 data)
1677{
1678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1679
1680 return dd->misc_err_status_cnt[6];
1681}
1682
1683static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1684 void *context, int vl, int mode,
1685 u64 data)
1686{
1687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1688
1689 return dd->misc_err_status_cnt[5];
1690}
1691
1692static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1693 void *context, int vl, int mode,
1694 u64 data)
1695{
1696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1697
1698 return dd->misc_err_status_cnt[4];
1699}
1700
1701static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1702 void *context, int vl,
1703 int mode, u64 data)
1704{
1705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1706
1707 return dd->misc_err_status_cnt[3];
1708}
1709
1710static u64 access_misc_csr_write_bad_addr_err_cnt(
1711 const struct cntr_entry *entry,
1712 void *context, int vl, int mode, u64 data)
1713{
1714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1715
1716 return dd->misc_err_status_cnt[2];
1717}
1718
1719static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1720 void *context, int vl,
1721 int mode, u64 data)
1722{
1723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1724
1725 return dd->misc_err_status_cnt[1];
1726}
1727
1728static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1729 void *context, int vl, int mode,
1730 u64 data)
1731{
1732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1733
1734 return dd->misc_err_status_cnt[0];
1735}
1736
1737/*
1738 * Software counter for the aggregate of
1739 * individual CceErrStatus counters
1740 */
1741static u64 access_sw_cce_err_status_aggregated_cnt(
1742 const struct cntr_entry *entry,
1743 void *context, int vl, int mode, u64 data)
1744{
1745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746
1747 return dd->sw_cce_err_status_aggregate;
1748}
1749
1750/*
1751 * Software counters corresponding to each of the
1752 * error status bits within CceErrStatus
1753 */
1754static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1755 void *context, int vl, int mode,
1756 u64 data)
1757{
1758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1759
1760 return dd->cce_err_status_cnt[40];
1761}
1762
1763static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1764 void *context, int vl, int mode,
1765 u64 data)
1766{
1767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1768
1769 return dd->cce_err_status_cnt[39];
1770}
1771
1772static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1773 void *context, int vl, int mode,
1774 u64 data)
1775{
1776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1777
1778 return dd->cce_err_status_cnt[38];
1779}
1780
1781static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1782 void *context, int vl, int mode,
1783 u64 data)
1784{
1785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1786
1787 return dd->cce_err_status_cnt[37];
1788}
1789
1790static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1791 void *context, int vl, int mode,
1792 u64 data)
1793{
1794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1795
1796 return dd->cce_err_status_cnt[36];
1797}
1798
1799static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1800 const struct cntr_entry *entry,
1801 void *context, int vl, int mode, u64 data)
1802{
1803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1804
1805 return dd->cce_err_status_cnt[35];
1806}
1807
1808static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1809 const struct cntr_entry *entry,
1810 void *context, int vl, int mode, u64 data)
1811{
1812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1813
1814 return dd->cce_err_status_cnt[34];
1815}
1816
1817static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1818 void *context, int vl,
1819 int mode, u64 data)
1820{
1821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1822
1823 return dd->cce_err_status_cnt[33];
1824}
1825
1826static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1827 void *context, int vl, int mode,
1828 u64 data)
1829{
1830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1831
1832 return dd->cce_err_status_cnt[32];
1833}
1834
1835static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1836 void *context, int vl, int mode, u64 data)
1837{
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839
1840 return dd->cce_err_status_cnt[31];
1841}
1842
1843static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1844 void *context, int vl, int mode,
1845 u64 data)
1846{
1847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848
1849 return dd->cce_err_status_cnt[30];
1850}
1851
1852static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1853 void *context, int vl, int mode,
1854 u64 data)
1855{
1856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1857
1858 return dd->cce_err_status_cnt[29];
1859}
1860
1861static u64 access_pcic_transmit_back_parity_err_cnt(
1862 const struct cntr_entry *entry,
1863 void *context, int vl, int mode, u64 data)
1864{
1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1866
1867 return dd->cce_err_status_cnt[28];
1868}
1869
1870static u64 access_pcic_transmit_front_parity_err_cnt(
1871 const struct cntr_entry *entry,
1872 void *context, int vl, int mode, u64 data)
1873{
1874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1875
1876 return dd->cce_err_status_cnt[27];
1877}
1878
1879static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1880 void *context, int vl, int mode,
1881 u64 data)
1882{
1883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1884
1885 return dd->cce_err_status_cnt[26];
1886}
1887
1888static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1889 void *context, int vl, int mode,
1890 u64 data)
1891{
1892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1893
1894 return dd->cce_err_status_cnt[25];
1895}
1896
1897static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1898 void *context, int vl, int mode,
1899 u64 data)
1900{
1901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1902
1903 return dd->cce_err_status_cnt[24];
1904}
1905
1906static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1907 void *context, int vl, int mode,
1908 u64 data)
1909{
1910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1911
1912 return dd->cce_err_status_cnt[23];
1913}
1914
1915static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1916 void *context, int vl,
1917 int mode, u64 data)
1918{
1919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1920
1921 return dd->cce_err_status_cnt[22];
1922}
1923
1924static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1925 void *context, int vl, int mode,
1926 u64 data)
1927{
1928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1929
1930 return dd->cce_err_status_cnt[21];
1931}
1932
1933static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1934 const struct cntr_entry *entry,
1935 void *context, int vl, int mode, u64 data)
1936{
1937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1938
1939 return dd->cce_err_status_cnt[20];
1940}
1941
1942static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1943 void *context, int vl,
1944 int mode, u64 data)
1945{
1946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1947
1948 return dd->cce_err_status_cnt[19];
1949}
1950
1951static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1952 void *context, int vl, int mode,
1953 u64 data)
1954{
1955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1956
1957 return dd->cce_err_status_cnt[18];
1958}
1959
1960static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1961 void *context, int vl, int mode,
1962 u64 data)
1963{
1964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1965
1966 return dd->cce_err_status_cnt[17];
1967}
1968
1969static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1970 void *context, int vl, int mode,
1971 u64 data)
1972{
1973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1974
1975 return dd->cce_err_status_cnt[16];
1976}
1977
1978static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1979 void *context, int vl, int mode,
1980 u64 data)
1981{
1982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1983
1984 return dd->cce_err_status_cnt[15];
1985}
1986
1987static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1988 void *context, int vl,
1989 int mode, u64 data)
1990{
1991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1992
1993 return dd->cce_err_status_cnt[14];
1994}
1995
1996static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1997 void *context, int vl, int mode,
1998 u64 data)
1999{
2000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2001
2002 return dd->cce_err_status_cnt[13];
2003}
2004
2005static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2006 const struct cntr_entry *entry,
2007 void *context, int vl, int mode, u64 data)
2008{
2009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2010
2011 return dd->cce_err_status_cnt[12];
2012}
2013
2014static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2015 const struct cntr_entry *entry,
2016 void *context, int vl, int mode, u64 data)
2017{
2018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2019
2020 return dd->cce_err_status_cnt[11];
2021}
2022
2023static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2024 const struct cntr_entry *entry,
2025 void *context, int vl, int mode, u64 data)
2026{
2027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2028
2029 return dd->cce_err_status_cnt[10];
2030}
2031
2032static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2033 const struct cntr_entry *entry,
2034 void *context, int vl, int mode, u64 data)
2035{
2036 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2037
2038 return dd->cce_err_status_cnt[9];
2039}
2040
2041static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2042 const struct cntr_entry *entry,
2043 void *context, int vl, int mode, u64 data)
2044{
2045 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2046
2047 return dd->cce_err_status_cnt[8];
2048}
2049
2050static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2051 void *context, int vl,
2052 int mode, u64 data)
2053{
2054 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2055
2056 return dd->cce_err_status_cnt[7];
2057}
2058
2059static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2060 const struct cntr_entry *entry,
2061 void *context, int vl, int mode, u64 data)
2062{
2063 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2064
2065 return dd->cce_err_status_cnt[6];
2066}
2067
2068static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2069 void *context, int vl, int mode,
2070 u64 data)
2071{
2072 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2073
2074 return dd->cce_err_status_cnt[5];
2075}
2076
2077static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2078 void *context, int vl, int mode,
2079 u64 data)
2080{
2081 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2082
2083 return dd->cce_err_status_cnt[4];
2084}
2085
2086static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2087 const struct cntr_entry *entry,
2088 void *context, int vl, int mode, u64 data)
2089{
2090 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2091
2092 return dd->cce_err_status_cnt[3];
2093}
2094
2095static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2096 void *context, int vl,
2097 int mode, u64 data)
2098{
2099 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2100
2101 return dd->cce_err_status_cnt[2];
2102}
2103
2104static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2105 void *context, int vl,
2106 int mode, u64 data)
2107{
2108 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2109
2110 return dd->cce_err_status_cnt[1];
2111}
2112
2113static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2114 void *context, int vl, int mode,
2115 u64 data)
2116{
2117 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2118
2119 return dd->cce_err_status_cnt[0];
2120}
2121
2122/*
2123 * Software counters corresponding to each of the
2124 * error status bits within RcvErrStatus
2125 */
2126static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2127 void *context, int vl, int mode,
2128 u64 data)
2129{
2130 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132 return dd->rcv_err_status_cnt[63];
2133}
2134
2135static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2136 void *context, int vl,
2137 int mode, u64 data)
2138{
2139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141 return dd->rcv_err_status_cnt[62];
2142}
2143
2144static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2145 void *context, int vl, int mode,
2146 u64 data)
2147{
2148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150 return dd->rcv_err_status_cnt[61];
2151}
2152
2153static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2154 void *context, int vl, int mode,
2155 u64 data)
2156{
2157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159 return dd->rcv_err_status_cnt[60];
2160}
2161
2162static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2163 void *context, int vl,
2164 int mode, u64 data)
2165{
2166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168 return dd->rcv_err_status_cnt[59];
2169}
2170
2171static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2172 void *context, int vl,
2173 int mode, u64 data)
2174{
2175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177 return dd->rcv_err_status_cnt[58];
2178}
2179
2180static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2181 void *context, int vl, int mode,
2182 u64 data)
2183{
2184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186 return dd->rcv_err_status_cnt[57];
2187}
2188
2189static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2190 void *context, int vl, int mode,
2191 u64 data)
2192{
2193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195 return dd->rcv_err_status_cnt[56];
2196}
2197
2198static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2199 void *context, int vl, int mode,
2200 u64 data)
2201{
2202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204 return dd->rcv_err_status_cnt[55];
2205}
2206
2207static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2208 const struct cntr_entry *entry,
2209 void *context, int vl, int mode, u64 data)
2210{
2211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213 return dd->rcv_err_status_cnt[54];
2214}
2215
2216static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2217 const struct cntr_entry *entry,
2218 void *context, int vl, int mode, u64 data)
2219{
2220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2221
2222 return dd->rcv_err_status_cnt[53];
2223}
2224
2225static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2226 void *context, int vl,
2227 int mode, u64 data)
2228{
2229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2230
2231 return dd->rcv_err_status_cnt[52];
2232}
2233
2234static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2235 void *context, int vl,
2236 int mode, u64 data)
2237{
2238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2239
2240 return dd->rcv_err_status_cnt[51];
2241}
2242
2243static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2244 void *context, int vl,
2245 int mode, u64 data)
2246{
2247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2248
2249 return dd->rcv_err_status_cnt[50];
2250}
2251
2252static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2253 void *context, int vl,
2254 int mode, u64 data)
2255{
2256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2257
2258 return dd->rcv_err_status_cnt[49];
2259}
2260
2261static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2262 void *context, int vl,
2263 int mode, u64 data)
2264{
2265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2266
2267 return dd->rcv_err_status_cnt[48];
2268}
2269
2270static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2271 void *context, int vl,
2272 int mode, u64 data)
2273{
2274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2275
2276 return dd->rcv_err_status_cnt[47];
2277}
2278
2279static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2280 void *context, int vl, int mode,
2281 u64 data)
2282{
2283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2284
2285 return dd->rcv_err_status_cnt[46];
2286}
2287
2288static u64 access_rx_hq_intr_csr_parity_err_cnt(
2289 const struct cntr_entry *entry,
2290 void *context, int vl, int mode, u64 data)
2291{
2292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2293
2294 return dd->rcv_err_status_cnt[45];
2295}
2296
2297static u64 access_rx_lookup_csr_parity_err_cnt(
2298 const struct cntr_entry *entry,
2299 void *context, int vl, int mode, u64 data)
2300{
2301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2302
2303 return dd->rcv_err_status_cnt[44];
2304}
2305
2306static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2307 const struct cntr_entry *entry,
2308 void *context, int vl, int mode, u64 data)
2309{
2310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2311
2312 return dd->rcv_err_status_cnt[43];
2313}
2314
2315static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2316 const struct cntr_entry *entry,
2317 void *context, int vl, int mode, u64 data)
2318{
2319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2320
2321 return dd->rcv_err_status_cnt[42];
2322}
2323
2324static u64 access_rx_lookup_des_part2_parity_err_cnt(
2325 const struct cntr_entry *entry,
2326 void *context, int vl, int mode, u64 data)
2327{
2328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2329
2330 return dd->rcv_err_status_cnt[41];
2331}
2332
2333static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2334 const struct cntr_entry *entry,
2335 void *context, int vl, int mode, u64 data)
2336{
2337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2338
2339 return dd->rcv_err_status_cnt[40];
2340}
2341
2342static u64 access_rx_lookup_des_part1_unc_err_cnt(
2343 const struct cntr_entry *entry,
2344 void *context, int vl, int mode, u64 data)
2345{
2346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2347
2348 return dd->rcv_err_status_cnt[39];
2349}
2350
2351static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2352 const struct cntr_entry *entry,
2353 void *context, int vl, int mode, u64 data)
2354{
2355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2356
2357 return dd->rcv_err_status_cnt[38];
2358}
2359
2360static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2361 const struct cntr_entry *entry,
2362 void *context, int vl, int mode, u64 data)
2363{
2364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2365
2366 return dd->rcv_err_status_cnt[37];
2367}
2368
2369static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2370 const struct cntr_entry *entry,
2371 void *context, int vl, int mode, u64 data)
2372{
2373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2374
2375 return dd->rcv_err_status_cnt[36];
2376}
2377
2378static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2379 const struct cntr_entry *entry,
2380 void *context, int vl, int mode, u64 data)
2381{
2382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2383
2384 return dd->rcv_err_status_cnt[35];
2385}
2386
2387static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2388 const struct cntr_entry *entry,
2389 void *context, int vl, int mode, u64 data)
2390{
2391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2392
2393 return dd->rcv_err_status_cnt[34];
2394}
2395
2396static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2397 const struct cntr_entry *entry,
2398 void *context, int vl, int mode, u64 data)
2399{
2400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2401
2402 return dd->rcv_err_status_cnt[33];
2403}
2404
2405static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2406 void *context, int vl, int mode,
2407 u64 data)
2408{
2409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2410
2411 return dd->rcv_err_status_cnt[32];
2412}
2413
2414static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2415 void *context, int vl, int mode,
2416 u64 data)
2417{
2418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2419
2420 return dd->rcv_err_status_cnt[31];
2421}
2422
2423static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2424 void *context, int vl, int mode,
2425 u64 data)
2426{
2427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2428
2429 return dd->rcv_err_status_cnt[30];
2430}
2431
2432static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2433 void *context, int vl, int mode,
2434 u64 data)
2435{
2436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2437
2438 return dd->rcv_err_status_cnt[29];
2439}
2440
2441static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2442 void *context, int vl,
2443 int mode, u64 data)
2444{
2445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2446
2447 return dd->rcv_err_status_cnt[28];
2448}
2449
2450static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2451 const struct cntr_entry *entry,
2452 void *context, int vl, int mode, u64 data)
2453{
2454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2455
2456 return dd->rcv_err_status_cnt[27];
2457}
2458
2459static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2460 const struct cntr_entry *entry,
2461 void *context, int vl, int mode, u64 data)
2462{
2463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2464
2465 return dd->rcv_err_status_cnt[26];
2466}
2467
2468static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2469 const struct cntr_entry *entry,
2470 void *context, int vl, int mode, u64 data)
2471{
2472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2473
2474 return dd->rcv_err_status_cnt[25];
2475}
2476
2477static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2478 const struct cntr_entry *entry,
2479 void *context, int vl, int mode, u64 data)
2480{
2481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2482
2483 return dd->rcv_err_status_cnt[24];
2484}
2485
2486static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2487 const struct cntr_entry *entry,
2488 void *context, int vl, int mode, u64 data)
2489{
2490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2491
2492 return dd->rcv_err_status_cnt[23];
2493}
2494
2495static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2496 const struct cntr_entry *entry,
2497 void *context, int vl, int mode, u64 data)
2498{
2499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2500
2501 return dd->rcv_err_status_cnt[22];
2502}
2503
2504static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2505 const struct cntr_entry *entry,
2506 void *context, int vl, int mode, u64 data)
2507{
2508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2509
2510 return dd->rcv_err_status_cnt[21];
2511}
2512
2513static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2514 const struct cntr_entry *entry,
2515 void *context, int vl, int mode, u64 data)
2516{
2517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2518
2519 return dd->rcv_err_status_cnt[20];
2520}
2521
2522static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2523 const struct cntr_entry *entry,
2524 void *context, int vl, int mode, u64 data)
2525{
2526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2527
2528 return dd->rcv_err_status_cnt[19];
2529}
2530
2531static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2532 void *context, int vl,
2533 int mode, u64 data)
2534{
2535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2536
2537 return dd->rcv_err_status_cnt[18];
2538}
2539
2540static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2541 void *context, int vl,
2542 int mode, u64 data)
2543{
2544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2545
2546 return dd->rcv_err_status_cnt[17];
2547}
2548
2549static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2550 const struct cntr_entry *entry,
2551 void *context, int vl, int mode, u64 data)
2552{
2553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2554
2555 return dd->rcv_err_status_cnt[16];
2556}
2557
2558static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2559 const struct cntr_entry *entry,
2560 void *context, int vl, int mode, u64 data)
2561{
2562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2563
2564 return dd->rcv_err_status_cnt[15];
2565}
2566
2567static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2568 void *context, int vl,
2569 int mode, u64 data)
2570{
2571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2572
2573 return dd->rcv_err_status_cnt[14];
2574}
2575
2576static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2577 void *context, int vl,
2578 int mode, u64 data)
2579{
2580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2581
2582 return dd->rcv_err_status_cnt[13];
2583}
2584
2585static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2586 void *context, int vl, int mode,
2587 u64 data)
2588{
2589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2590
2591 return dd->rcv_err_status_cnt[12];
2592}
2593
2594static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2595 void *context, int vl, int mode,
2596 u64 data)
2597{
2598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2599
2600 return dd->rcv_err_status_cnt[11];
2601}
2602
2603static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2604 void *context, int vl, int mode,
2605 u64 data)
2606{
2607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2608
2609 return dd->rcv_err_status_cnt[10];
2610}
2611
2612static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2613 void *context, int vl, int mode,
2614 u64 data)
2615{
2616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2617
2618 return dd->rcv_err_status_cnt[9];
2619}
2620
2621static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2622 void *context, int vl, int mode,
2623 u64 data)
2624{
2625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2626
2627 return dd->rcv_err_status_cnt[8];
2628}
2629
2630static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2631 const struct cntr_entry *entry,
2632 void *context, int vl, int mode, u64 data)
2633{
2634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2635
2636 return dd->rcv_err_status_cnt[7];
2637}
2638
2639static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2640 const struct cntr_entry *entry,
2641 void *context, int vl, int mode, u64 data)
2642{
2643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2644
2645 return dd->rcv_err_status_cnt[6];
2646}
2647
2648static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2649 void *context, int vl, int mode,
2650 u64 data)
2651{
2652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2653
2654 return dd->rcv_err_status_cnt[5];
2655}
2656
2657static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2658 void *context, int vl, int mode,
2659 u64 data)
2660{
2661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2662
2663 return dd->rcv_err_status_cnt[4];
2664}
2665
2666static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2667 void *context, int vl, int mode,
2668 u64 data)
2669{
2670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2671
2672 return dd->rcv_err_status_cnt[3];
2673}
2674
2675static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2676 void *context, int vl, int mode,
2677 u64 data)
2678{
2679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2680
2681 return dd->rcv_err_status_cnt[2];
2682}
2683
2684static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2685 void *context, int vl, int mode,
2686 u64 data)
2687{
2688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2689
2690 return dd->rcv_err_status_cnt[1];
2691}
2692
2693static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2694 void *context, int vl, int mode,
2695 u64 data)
2696{
2697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2698
2699 return dd->rcv_err_status_cnt[0];
2700}
2701
2702/*
2703 * Software counters corresponding to each of the
2704 * error status bits within SendPioErrStatus
2705 */
2706static u64 access_pio_pec_sop_head_parity_err_cnt(
2707 const struct cntr_entry *entry,
2708 void *context, int vl, int mode, u64 data)
2709{
2710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712 return dd->send_pio_err_status_cnt[35];
2713}
2714
2715static u64 access_pio_pcc_sop_head_parity_err_cnt(
2716 const struct cntr_entry *entry,
2717 void *context, int vl, int mode, u64 data)
2718{
2719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721 return dd->send_pio_err_status_cnt[34];
2722}
2723
2724static u64 access_pio_last_returned_cnt_parity_err_cnt(
2725 const struct cntr_entry *entry,
2726 void *context, int vl, int mode, u64 data)
2727{
2728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730 return dd->send_pio_err_status_cnt[33];
2731}
2732
2733static u64 access_pio_current_free_cnt_parity_err_cnt(
2734 const struct cntr_entry *entry,
2735 void *context, int vl, int mode, u64 data)
2736{
2737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739 return dd->send_pio_err_status_cnt[32];
2740}
2741
2742static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2743 void *context, int vl, int mode,
2744 u64 data)
2745{
2746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748 return dd->send_pio_err_status_cnt[31];
2749}
2750
2751static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2752 void *context, int vl, int mode,
2753 u64 data)
2754{
2755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757 return dd->send_pio_err_status_cnt[30];
2758}
2759
2760static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2761 void *context, int vl, int mode,
2762 u64 data)
2763{
2764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766 return dd->send_pio_err_status_cnt[29];
2767}
2768
2769static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2770 const struct cntr_entry *entry,
2771 void *context, int vl, int mode, u64 data)
2772{
2773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775 return dd->send_pio_err_status_cnt[28];
2776}
2777
2778static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2779 void *context, int vl, int mode,
2780 u64 data)
2781{
2782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784 return dd->send_pio_err_status_cnt[27];
2785}
2786
2787static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2788 void *context, int vl, int mode,
2789 u64 data)
2790{
2791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793 return dd->send_pio_err_status_cnt[26];
2794}
2795
2796static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2797 void *context, int vl,
2798 int mode, u64 data)
2799{
2800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2801
2802 return dd->send_pio_err_status_cnt[25];
2803}
2804
2805static u64 access_pio_block_qw_count_parity_err_cnt(
2806 const struct cntr_entry *entry,
2807 void *context, int vl, int mode, u64 data)
2808{
2809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2810
2811 return dd->send_pio_err_status_cnt[24];
2812}
2813
2814static u64 access_pio_write_qw_valid_parity_err_cnt(
2815 const struct cntr_entry *entry,
2816 void *context, int vl, int mode, u64 data)
2817{
2818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2819
2820 return dd->send_pio_err_status_cnt[23];
2821}
2822
2823static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2824 void *context, int vl, int mode,
2825 u64 data)
2826{
2827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2828
2829 return dd->send_pio_err_status_cnt[22];
2830}
2831
2832static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2833 void *context, int vl,
2834 int mode, u64 data)
2835{
2836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2837
2838 return dd->send_pio_err_status_cnt[21];
2839}
2840
2841static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2842 void *context, int vl,
2843 int mode, u64 data)
2844{
2845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2846
2847 return dd->send_pio_err_status_cnt[20];
2848}
2849
2850static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2851 void *context, int vl,
2852 int mode, u64 data)
2853{
2854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2855
2856 return dd->send_pio_err_status_cnt[19];
2857}
2858
2859static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2860 const struct cntr_entry *entry,
2861 void *context, int vl, int mode, u64 data)
2862{
2863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2864
2865 return dd->send_pio_err_status_cnt[18];
2866}
2867
2868static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2869 void *context, int vl, int mode,
2870 u64 data)
2871{
2872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2873
2874 return dd->send_pio_err_status_cnt[17];
2875}
2876
2877static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2878 void *context, int vl, int mode,
2879 u64 data)
2880{
2881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2882
2883 return dd->send_pio_err_status_cnt[16];
2884}
2885
2886static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2887 const struct cntr_entry *entry,
2888 void *context, int vl, int mode, u64 data)
2889{
2890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2891
2892 return dd->send_pio_err_status_cnt[15];
2893}
2894
2895static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2896 const struct cntr_entry *entry,
2897 void *context, int vl, int mode, u64 data)
2898{
2899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2900
2901 return dd->send_pio_err_status_cnt[14];
2902}
2903
2904static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2905 const struct cntr_entry *entry,
2906 void *context, int vl, int mode, u64 data)
2907{
2908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2909
2910 return dd->send_pio_err_status_cnt[13];
2911}
2912
2913static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2914 const struct cntr_entry *entry,
2915 void *context, int vl, int mode, u64 data)
2916{
2917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2918
2919 return dd->send_pio_err_status_cnt[12];
2920}
2921
2922static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2923 const struct cntr_entry *entry,
2924 void *context, int vl, int mode, u64 data)
2925{
2926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2927
2928 return dd->send_pio_err_status_cnt[11];
2929}
2930
2931static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2932 const struct cntr_entry *entry,
2933 void *context, int vl, int mode, u64 data)
2934{
2935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2936
2937 return dd->send_pio_err_status_cnt[10];
2938}
2939
2940static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2941 const struct cntr_entry *entry,
2942 void *context, int vl, int mode, u64 data)
2943{
2944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2945
2946 return dd->send_pio_err_status_cnt[9];
2947}
2948
2949static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2950 const struct cntr_entry *entry,
2951 void *context, int vl, int mode, u64 data)
2952{
2953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2954
2955 return dd->send_pio_err_status_cnt[8];
2956}
2957
2958static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2959 const struct cntr_entry *entry,
2960 void *context, int vl, int mode, u64 data)
2961{
2962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2963
2964 return dd->send_pio_err_status_cnt[7];
2965}
2966
2967static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2968 void *context, int vl, int mode,
2969 u64 data)
2970{
2971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2972
2973 return dd->send_pio_err_status_cnt[6];
2974}
2975
2976static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2977 void *context, int vl, int mode,
2978 u64 data)
2979{
2980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2981
2982 return dd->send_pio_err_status_cnt[5];
2983}
2984
2985static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2986 void *context, int vl, int mode,
2987 u64 data)
2988{
2989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2990
2991 return dd->send_pio_err_status_cnt[4];
2992}
2993
2994static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2995 void *context, int vl, int mode,
2996 u64 data)
2997{
2998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2999
3000 return dd->send_pio_err_status_cnt[3];
3001}
3002
3003static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3004 void *context, int vl, int mode,
3005 u64 data)
3006{
3007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3008
3009 return dd->send_pio_err_status_cnt[2];
3010}
3011
3012static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3013 void *context, int vl,
3014 int mode, u64 data)
3015{
3016 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3017
3018 return dd->send_pio_err_status_cnt[1];
3019}
3020
3021static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3022 void *context, int vl, int mode,
3023 u64 data)
3024{
3025 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3026
3027 return dd->send_pio_err_status_cnt[0];
3028}
3029
3030/*
3031 * Software counters corresponding to each of the
3032 * error status bits within SendDmaErrStatus
3033 */
3034static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3035 const struct cntr_entry *entry,
3036 void *context, int vl, int mode, u64 data)
3037{
3038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040 return dd->send_dma_err_status_cnt[3];
3041}
3042
3043static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3044 const struct cntr_entry *entry,
3045 void *context, int vl, int mode, u64 data)
3046{
3047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049 return dd->send_dma_err_status_cnt[2];
3050}
3051
3052static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3053 void *context, int vl, int mode,
3054 u64 data)
3055{
3056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057
3058 return dd->send_dma_err_status_cnt[1];
3059}
3060
3061static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3062 void *context, int vl, int mode,
3063 u64 data)
3064{
3065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066
3067 return dd->send_dma_err_status_cnt[0];
3068}
3069
3070/*
3071 * Software counters corresponding to each of the
3072 * error status bits within SendEgressErrStatus
3073 */
3074static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3075 const struct cntr_entry *entry,
3076 void *context, int vl, int mode, u64 data)
3077{
3078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3079
3080 return dd->send_egress_err_status_cnt[63];
3081}
3082
3083static u64 access_tx_read_sdma_memory_csr_err_cnt(
3084 const struct cntr_entry *entry,
3085 void *context, int vl, int mode, u64 data)
3086{
3087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3088
3089 return dd->send_egress_err_status_cnt[62];
3090}
3091
3092static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3093 void *context, int vl, int mode,
3094 u64 data)
3095{
3096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3097
3098 return dd->send_egress_err_status_cnt[61];
3099}
3100
3101static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3102 void *context, int vl,
3103 int mode, u64 data)
3104{
3105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3106
3107 return dd->send_egress_err_status_cnt[60];
3108}
3109
3110static u64 access_tx_read_sdma_memory_cor_err_cnt(
3111 const struct cntr_entry *entry,
3112 void *context, int vl, int mode, u64 data)
3113{
3114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3115
3116 return dd->send_egress_err_status_cnt[59];
3117}
3118
3119static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3120 void *context, int vl, int mode,
3121 u64 data)
3122{
3123 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3124
3125 return dd->send_egress_err_status_cnt[58];
3126}
3127
3128static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3129 void *context, int vl, int mode,
3130 u64 data)
3131{
3132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134 return dd->send_egress_err_status_cnt[57];
3135}
3136
3137static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3138 void *context, int vl, int mode,
3139 u64 data)
3140{
3141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143 return dd->send_egress_err_status_cnt[56];
3144}
3145
3146static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3147 void *context, int vl, int mode,
3148 u64 data)
3149{
3150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152 return dd->send_egress_err_status_cnt[55];
3153}
3154
3155static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3156 void *context, int vl, int mode,
3157 u64 data)
3158{
3159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161 return dd->send_egress_err_status_cnt[54];
3162}
3163
3164static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3165 void *context, int vl, int mode,
3166 u64 data)
3167{
3168 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3169
3170 return dd->send_egress_err_status_cnt[53];
3171}
3172
3173static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3174 void *context, int vl, int mode,
3175 u64 data)
3176{
3177 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3178
3179 return dd->send_egress_err_status_cnt[52];
3180}
3181
3182static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3183 void *context, int vl, int mode,
3184 u64 data)
3185{
3186 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3187
3188 return dd->send_egress_err_status_cnt[51];
3189}
3190
3191static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3192 void *context, int vl, int mode,
3193 u64 data)
3194{
3195 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3196
3197 return dd->send_egress_err_status_cnt[50];
3198}
3199
3200static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3201 void *context, int vl, int mode,
3202 u64 data)
3203{
3204 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3205
3206 return dd->send_egress_err_status_cnt[49];
3207}
3208
3209static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3210 void *context, int vl, int mode,
3211 u64 data)
3212{
3213 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3214
3215 return dd->send_egress_err_status_cnt[48];
3216}
3217
3218static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3219 void *context, int vl, int mode,
3220 u64 data)
3221{
3222 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3223
3224 return dd->send_egress_err_status_cnt[47];
3225}
3226
3227static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3228 void *context, int vl, int mode,
3229 u64 data)
3230{
3231 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3232
3233 return dd->send_egress_err_status_cnt[46];
3234}
3235
3236static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3237 void *context, int vl, int mode,
3238 u64 data)
3239{
3240 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3241
3242 return dd->send_egress_err_status_cnt[45];
3243}
3244
3245static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3246 void *context, int vl,
3247 int mode, u64 data)
3248{
3249 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3250
3251 return dd->send_egress_err_status_cnt[44];
3252}
3253
3254static u64 access_tx_read_sdma_memory_unc_err_cnt(
3255 const struct cntr_entry *entry,
3256 void *context, int vl, int mode, u64 data)
3257{
3258 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3259
3260 return dd->send_egress_err_status_cnt[43];
3261}
3262
3263static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3264 void *context, int vl, int mode,
3265 u64 data)
3266{
3267 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3268
3269 return dd->send_egress_err_status_cnt[42];
3270}
3271
3272static u64 access_tx_credit_return_partiy_err_cnt(
3273 const struct cntr_entry *entry,
3274 void *context, int vl, int mode, u64 data)
3275{
3276 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3277
3278 return dd->send_egress_err_status_cnt[41];
3279}
3280
3281static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3282 const struct cntr_entry *entry,
3283 void *context, int vl, int mode, u64 data)
3284{
3285 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3286
3287 return dd->send_egress_err_status_cnt[40];
3288}
3289
3290static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3291 const struct cntr_entry *entry,
3292 void *context, int vl, int mode, u64 data)
3293{
3294 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3295
3296 return dd->send_egress_err_status_cnt[39];
3297}
3298
3299static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3300 const struct cntr_entry *entry,
3301 void *context, int vl, int mode, u64 data)
3302{
3303 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3304
3305 return dd->send_egress_err_status_cnt[38];
3306}
3307
3308static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3309 const struct cntr_entry *entry,
3310 void *context, int vl, int mode, u64 data)
3311{
3312 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3313
3314 return dd->send_egress_err_status_cnt[37];
3315}
3316
3317static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3318 const struct cntr_entry *entry,
3319 void *context, int vl, int mode, u64 data)
3320{
3321 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3322
3323 return dd->send_egress_err_status_cnt[36];
3324}
3325
3326static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3327 const struct cntr_entry *entry,
3328 void *context, int vl, int mode, u64 data)
3329{
3330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3331
3332 return dd->send_egress_err_status_cnt[35];
3333}
3334
3335static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3336 const struct cntr_entry *entry,
3337 void *context, int vl, int mode, u64 data)
3338{
3339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3340
3341 return dd->send_egress_err_status_cnt[34];
3342}
3343
3344static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3345 const struct cntr_entry *entry,
3346 void *context, int vl, int mode, u64 data)
3347{
3348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3349
3350 return dd->send_egress_err_status_cnt[33];
3351}
3352
3353static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3354 const struct cntr_entry *entry,
3355 void *context, int vl, int mode, u64 data)
3356{
3357 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3358
3359 return dd->send_egress_err_status_cnt[32];
3360}
3361
3362static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3363 const struct cntr_entry *entry,
3364 void *context, int vl, int mode, u64 data)
3365{
3366 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3367
3368 return dd->send_egress_err_status_cnt[31];
3369}
3370
3371static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3372 const struct cntr_entry *entry,
3373 void *context, int vl, int mode, u64 data)
3374{
3375 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3376
3377 return dd->send_egress_err_status_cnt[30];
3378}
3379
3380static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3381 const struct cntr_entry *entry,
3382 void *context, int vl, int mode, u64 data)
3383{
3384 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3385
3386 return dd->send_egress_err_status_cnt[29];
3387}
3388
3389static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3390 const struct cntr_entry *entry,
3391 void *context, int vl, int mode, u64 data)
3392{
3393 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3394
3395 return dd->send_egress_err_status_cnt[28];
3396}
3397
3398static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3399 const struct cntr_entry *entry,
3400 void *context, int vl, int mode, u64 data)
3401{
3402 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3403
3404 return dd->send_egress_err_status_cnt[27];
3405}
3406
3407static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3408 const struct cntr_entry *entry,
3409 void *context, int vl, int mode, u64 data)
3410{
3411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3412
3413 return dd->send_egress_err_status_cnt[26];
3414}
3415
3416static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3417 const struct cntr_entry *entry,
3418 void *context, int vl, int mode, u64 data)
3419{
3420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3421
3422 return dd->send_egress_err_status_cnt[25];
3423}
3424
3425static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3426 const struct cntr_entry *entry,
3427 void *context, int vl, int mode, u64 data)
3428{
3429 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3430
3431 return dd->send_egress_err_status_cnt[24];
3432}
3433
3434static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3435 const struct cntr_entry *entry,
3436 void *context, int vl, int mode, u64 data)
3437{
3438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3439
3440 return dd->send_egress_err_status_cnt[23];
3441}
3442
3443static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3444 const struct cntr_entry *entry,
3445 void *context, int vl, int mode, u64 data)
3446{
3447 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3448
3449 return dd->send_egress_err_status_cnt[22];
3450}
3451
3452static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3453 const struct cntr_entry *entry,
3454 void *context, int vl, int mode, u64 data)
3455{
3456 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3457
3458 return dd->send_egress_err_status_cnt[21];
3459}
3460
3461static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3462 const struct cntr_entry *entry,
3463 void *context, int vl, int mode, u64 data)
3464{
3465 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3466
3467 return dd->send_egress_err_status_cnt[20];
3468}
3469
3470static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3471 const struct cntr_entry *entry,
3472 void *context, int vl, int mode, u64 data)
3473{
3474 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3475
3476 return dd->send_egress_err_status_cnt[19];
3477}
3478
3479static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3480 const struct cntr_entry *entry,
3481 void *context, int vl, int mode, u64 data)
3482{
3483 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3484
3485 return dd->send_egress_err_status_cnt[18];
3486}
3487
3488static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3489 const struct cntr_entry *entry,
3490 void *context, int vl, int mode, u64 data)
3491{
3492 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3493
3494 return dd->send_egress_err_status_cnt[17];
3495}
3496
3497static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3498 const struct cntr_entry *entry,
3499 void *context, int vl, int mode, u64 data)
3500{
3501 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3502
3503 return dd->send_egress_err_status_cnt[16];
3504}
3505
3506static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3507 void *context, int vl, int mode,
3508 u64 data)
3509{
3510 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3511
3512 return dd->send_egress_err_status_cnt[15];
3513}
3514
3515static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3516 void *context, int vl,
3517 int mode, u64 data)
3518{
3519 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3520
3521 return dd->send_egress_err_status_cnt[14];
3522}
3523
3524static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3525 void *context, int vl, int mode,
3526 u64 data)
3527{
3528 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3529
3530 return dd->send_egress_err_status_cnt[13];
3531}
3532
3533static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3534 void *context, int vl, int mode,
3535 u64 data)
3536{
3537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3538
3539 return dd->send_egress_err_status_cnt[12];
3540}
3541
3542static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3543 const struct cntr_entry *entry,
3544 void *context, int vl, int mode, u64 data)
3545{
3546 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3547
3548 return dd->send_egress_err_status_cnt[11];
3549}
3550
3551static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3552 void *context, int vl, int mode,
3553 u64 data)
3554{
3555 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3556
3557 return dd->send_egress_err_status_cnt[10];
3558}
3559
3560static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3561 void *context, int vl, int mode,
3562 u64 data)
3563{
3564 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3565
3566 return dd->send_egress_err_status_cnt[9];
3567}
3568
3569static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3570 const struct cntr_entry *entry,
3571 void *context, int vl, int mode, u64 data)
3572{
3573 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3574
3575 return dd->send_egress_err_status_cnt[8];
3576}
3577
3578static u64 access_tx_pio_launch_intf_parity_err_cnt(
3579 const struct cntr_entry *entry,
3580 void *context, int vl, int mode, u64 data)
3581{
3582 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3583
3584 return dd->send_egress_err_status_cnt[7];
3585}
3586
3587static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3588 void *context, int vl, int mode,
3589 u64 data)
3590{
3591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3592
3593 return dd->send_egress_err_status_cnt[6];
3594}
3595
3596static u64 access_tx_incorrect_link_state_err_cnt(
3597 const struct cntr_entry *entry,
3598 void *context, int vl, int mode, u64 data)
3599{
3600 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3601
3602 return dd->send_egress_err_status_cnt[5];
3603}
3604
3605static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3606 void *context, int vl, int mode,
3607 u64 data)
3608{
3609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3610
3611 return dd->send_egress_err_status_cnt[4];
3612}
3613
3614static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3615 const struct cntr_entry *entry,
3616 void *context, int vl, int mode, u64 data)
3617{
3618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3619
3620 return dd->send_egress_err_status_cnt[3];
3621}
3622
3623static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3624 void *context, int vl, int mode,
3625 u64 data)
3626{
3627 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3628
3629 return dd->send_egress_err_status_cnt[2];
3630}
3631
3632static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3633 const struct cntr_entry *entry,
3634 void *context, int vl, int mode, u64 data)
3635{
3636 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3637
3638 return dd->send_egress_err_status_cnt[1];
3639}
3640
3641static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3642 const struct cntr_entry *entry,
3643 void *context, int vl, int mode, u64 data)
3644{
3645 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3646
3647 return dd->send_egress_err_status_cnt[0];
3648}
3649
3650/*
3651 * Software counters corresponding to each of the
3652 * error status bits within SendErrStatus
3653 */
3654static u64 access_send_csr_write_bad_addr_err_cnt(
3655 const struct cntr_entry *entry,
3656 void *context, int vl, int mode, u64 data)
3657{
3658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660 return dd->send_err_status_cnt[2];
3661}
3662
3663static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3664 void *context, int vl,
3665 int mode, u64 data)
3666{
3667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668
3669 return dd->send_err_status_cnt[1];
3670}
3671
3672static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3673 void *context, int vl, int mode,
3674 u64 data)
3675{
3676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677
3678 return dd->send_err_status_cnt[0];
3679}
3680
3681/*
3682 * Software counters corresponding to each of the
3683 * error status bits within SendCtxtErrStatus
3684 */
3685static u64 access_pio_write_out_of_bounds_err_cnt(
3686 const struct cntr_entry *entry,
3687 void *context, int vl, int mode, u64 data)
3688{
3689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3690
3691 return dd->sw_ctxt_err_status_cnt[4];
3692}
3693
3694static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3695 void *context, int vl, int mode,
3696 u64 data)
3697{
3698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3699
3700 return dd->sw_ctxt_err_status_cnt[3];
3701}
3702
3703static u64 access_pio_write_crosses_boundary_err_cnt(
3704 const struct cntr_entry *entry,
3705 void *context, int vl, int mode, u64 data)
3706{
3707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3708
3709 return dd->sw_ctxt_err_status_cnt[2];
3710}
3711
3712static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3713 void *context, int vl,
3714 int mode, u64 data)
3715{
3716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3717
3718 return dd->sw_ctxt_err_status_cnt[1];
3719}
3720
3721static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3722 void *context, int vl, int mode,
3723 u64 data)
3724{
3725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3726
3727 return dd->sw_ctxt_err_status_cnt[0];
3728}
3729
3730/*
3731 * Software counters corresponding to each of the
3732 * error status bits within SendDmaEngErrStatus
3733 */
3734static u64 access_sdma_header_request_fifo_cor_err_cnt(
3735 const struct cntr_entry *entry,
3736 void *context, int vl, int mode, u64 data)
3737{
3738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3739
3740 return dd->sw_send_dma_eng_err_status_cnt[23];
3741}
3742
3743static u64 access_sdma_header_storage_cor_err_cnt(
3744 const struct cntr_entry *entry,
3745 void *context, int vl, int mode, u64 data)
3746{
3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748
3749 return dd->sw_send_dma_eng_err_status_cnt[22];
3750}
3751
3752static u64 access_sdma_packet_tracking_cor_err_cnt(
3753 const struct cntr_entry *entry,
3754 void *context, int vl, int mode, u64 data)
3755{
3756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3757
3758 return dd->sw_send_dma_eng_err_status_cnt[21];
3759}
3760
3761static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3762 void *context, int vl, int mode,
3763 u64 data)
3764{
3765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3766
3767 return dd->sw_send_dma_eng_err_status_cnt[20];
3768}
3769
3770static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3771 void *context, int vl, int mode,
3772 u64 data)
3773{
3774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3775
3776 return dd->sw_send_dma_eng_err_status_cnt[19];
3777}
3778
3779static u64 access_sdma_header_request_fifo_unc_err_cnt(
3780 const struct cntr_entry *entry,
3781 void *context, int vl, int mode, u64 data)
3782{
3783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785 return dd->sw_send_dma_eng_err_status_cnt[18];
3786}
3787
3788static u64 access_sdma_header_storage_unc_err_cnt(
3789 const struct cntr_entry *entry,
3790 void *context, int vl, int mode, u64 data)
3791{
3792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794 return dd->sw_send_dma_eng_err_status_cnt[17];
3795}
3796
3797static u64 access_sdma_packet_tracking_unc_err_cnt(
3798 const struct cntr_entry *entry,
3799 void *context, int vl, int mode, u64 data)
3800{
3801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803 return dd->sw_send_dma_eng_err_status_cnt[16];
3804}
3805
3806static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3807 void *context, int vl, int mode,
3808 u64 data)
3809{
3810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812 return dd->sw_send_dma_eng_err_status_cnt[15];
3813}
3814
3815static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3816 void *context, int vl, int mode,
3817 u64 data)
3818{
3819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821 return dd->sw_send_dma_eng_err_status_cnt[14];
3822}
3823
3824static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3825 void *context, int vl, int mode,
3826 u64 data)
3827{
3828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3829
3830 return dd->sw_send_dma_eng_err_status_cnt[13];
3831}
3832
3833static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3834 void *context, int vl, int mode,
3835 u64 data)
3836{
3837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3838
3839 return dd->sw_send_dma_eng_err_status_cnt[12];
3840}
3841
3842static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3843 void *context, int vl, int mode,
3844 u64 data)
3845{
3846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3847
3848 return dd->sw_send_dma_eng_err_status_cnt[11];
3849}
3850
3851static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3852 void *context, int vl, int mode,
3853 u64 data)
3854{
3855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3856
3857 return dd->sw_send_dma_eng_err_status_cnt[10];
3858}
3859
3860static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3861 void *context, int vl, int mode,
3862 u64 data)
3863{
3864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3865
3866 return dd->sw_send_dma_eng_err_status_cnt[9];
3867}
3868
3869static u64 access_sdma_packet_desc_overflow_err_cnt(
3870 const struct cntr_entry *entry,
3871 void *context, int vl, int mode, u64 data)
3872{
3873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3874
3875 return dd->sw_send_dma_eng_err_status_cnt[8];
3876}
3877
3878static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3879 void *context, int vl,
3880 int mode, u64 data)
3881{
3882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3883
3884 return dd->sw_send_dma_eng_err_status_cnt[7];
3885}
3886
3887static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3888 void *context, int vl, int mode, u64 data)
3889{
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891
3892 return dd->sw_send_dma_eng_err_status_cnt[6];
3893}
3894
3895static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3896 void *context, int vl, int mode,
3897 u64 data)
3898{
3899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3900
3901 return dd->sw_send_dma_eng_err_status_cnt[5];
3902}
3903
3904static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3905 void *context, int vl, int mode,
3906 u64 data)
3907{
3908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3909
3910 return dd->sw_send_dma_eng_err_status_cnt[4];
3911}
3912
3913static u64 access_sdma_tail_out_of_bounds_err_cnt(
3914 const struct cntr_entry *entry,
3915 void *context, int vl, int mode, u64 data)
3916{
3917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3918
3919 return dd->sw_send_dma_eng_err_status_cnt[3];
3920}
3921
3922static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3923 void *context, int vl, int mode,
3924 u64 data)
3925{
3926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3927
3928 return dd->sw_send_dma_eng_err_status_cnt[2];
3929}
3930
3931static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3932 void *context, int vl, int mode,
3933 u64 data)
3934{
3935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3936
3937 return dd->sw_send_dma_eng_err_status_cnt[1];
3938}
3939
3940static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3941 void *context, int vl, int mode,
3942 u64 data)
3943{
3944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3945
3946 return dd->sw_send_dma_eng_err_status_cnt[0];
3947}
3948
Mike Marciniszyn77241052015-07-30 15:17:43 -04003949#define def_access_sw_cpu(cntr) \
3950static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3951 void *context, int vl, int mode, u64 data) \
3952{ \
3953 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003954 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3955 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003956 mode, data); \
3957}
3958
3959def_access_sw_cpu(rc_acks);
3960def_access_sw_cpu(rc_qacks);
3961def_access_sw_cpu(rc_delayed_comp);
3962
3963#define def_access_ibp_counter(cntr) \
3964static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3965 void *context, int vl, int mode, u64 data) \
3966{ \
3967 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3968 \
3969 if (vl != CNTR_INVALID_VL) \
3970 return 0; \
3971 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003972 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003973 mode, data); \
3974}
3975
3976def_access_ibp_counter(loop_pkts);
3977def_access_ibp_counter(rc_resends);
3978def_access_ibp_counter(rnr_naks);
3979def_access_ibp_counter(other_naks);
3980def_access_ibp_counter(rc_timeouts);
3981def_access_ibp_counter(pkt_drops);
3982def_access_ibp_counter(dmawait);
3983def_access_ibp_counter(rc_seqnak);
3984def_access_ibp_counter(rc_dupreq);
3985def_access_ibp_counter(rdma_seq);
3986def_access_ibp_counter(unaligned);
3987def_access_ibp_counter(seq_naks);
3988
3989static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3990[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3991[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3992 CNTR_NORMAL),
3993[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3994 CNTR_NORMAL),
3995[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3996 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3997 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003998[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3999 CNTR_NORMAL),
4000[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4001 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4002[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4003 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4004[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4005 CNTR_NORMAL),
4006[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4007 CNTR_NORMAL),
4008[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4009 CNTR_NORMAL),
4010[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4011 CNTR_NORMAL),
4012[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4013 CNTR_NORMAL),
4014[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4015 CNTR_NORMAL),
4016[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4017 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4018[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4019 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4020[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4021 CNTR_SYNTH),
4022[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4023[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4024 CNTR_SYNTH),
4025[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4026 CNTR_SYNTH),
4027[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4028 CNTR_SYNTH),
4029[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4030 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4031[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4032 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4033 CNTR_SYNTH),
4034[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4035 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4036[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4037 CNTR_SYNTH),
4038[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4039 CNTR_SYNTH),
4040[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4041 CNTR_SYNTH),
4042[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4043 CNTR_SYNTH),
4044[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4045 CNTR_SYNTH),
4046[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4047 CNTR_SYNTH),
4048[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4049 CNTR_SYNTH),
4050[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4051 CNTR_SYNTH | CNTR_VL),
4052[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4053 CNTR_SYNTH | CNTR_VL),
4054[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4055[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4056 CNTR_SYNTH | CNTR_VL),
4057[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4058[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4059 CNTR_SYNTH | CNTR_VL),
4060[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4061 CNTR_SYNTH),
4062[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4063 CNTR_SYNTH | CNTR_VL),
4064[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4065 CNTR_SYNTH),
4066[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4067 CNTR_SYNTH | CNTR_VL),
4068[C_DC_TOTAL_CRC] =
4069 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4070 CNTR_SYNTH),
4071[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4072 CNTR_SYNTH),
4073[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4074 CNTR_SYNTH),
4075[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4076 CNTR_SYNTH),
4077[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4078 CNTR_SYNTH),
4079[C_DC_CRC_MULT_LN] =
4080 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4081 CNTR_SYNTH),
4082[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4083 CNTR_SYNTH),
4084[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4085 CNTR_SYNTH),
4086[C_DC_SEQ_CRC_CNT] =
4087 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4088 CNTR_SYNTH),
4089[C_DC_ESC0_ONLY_CNT] =
4090 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4091 CNTR_SYNTH),
4092[C_DC_ESC0_PLUS1_CNT] =
4093 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4094 CNTR_SYNTH),
4095[C_DC_ESC0_PLUS2_CNT] =
4096 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4097 CNTR_SYNTH),
4098[C_DC_REINIT_FROM_PEER_CNT] =
4099 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4100 CNTR_SYNTH),
4101[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4102 CNTR_SYNTH),
4103[C_DC_MISC_FLG_CNT] =
4104 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4105 CNTR_SYNTH),
4106[C_DC_PRF_GOOD_LTP_CNT] =
4107 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4108[C_DC_PRF_ACCEPTED_LTP_CNT] =
4109 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4110 CNTR_SYNTH),
4111[C_DC_PRF_RX_FLIT_CNT] =
4112 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4113[C_DC_PRF_TX_FLIT_CNT] =
4114 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4115[C_DC_PRF_CLK_CNTR] =
4116 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4117[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4118 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4119[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4120 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4121 CNTR_SYNTH),
4122[C_DC_PG_STS_TX_SBE_CNT] =
4123 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4124[C_DC_PG_STS_TX_MBE_CNT] =
4125 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4126 CNTR_SYNTH),
4127[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4128 access_sw_cpu_intr),
4129[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4130 access_sw_cpu_rcv_limit),
4131[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4132 access_sw_vtx_wait),
4133[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4134 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004135[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4136 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004137[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4138 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004139[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4140 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004141[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4142 SEND_DMA_DESC_FETCHED_CNT, 0,
4143 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4144 dev_access_u32_csr),
4145[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4146 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4147 access_sde_int_cnt),
4148[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4149 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4150 access_sde_err_cnt),
4151[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4152 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4153 access_sde_idle_int_cnt),
4154[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4155 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4156 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004157/* MISC_ERR_STATUS */
4158[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4159 CNTR_NORMAL,
4160 access_misc_pll_lock_fail_err_cnt),
4161[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4162 CNTR_NORMAL,
4163 access_misc_mbist_fail_err_cnt),
4164[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4165 CNTR_NORMAL,
4166 access_misc_invalid_eep_cmd_err_cnt),
4167[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4168 CNTR_NORMAL,
4169 access_misc_efuse_done_parity_err_cnt),
4170[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4171 CNTR_NORMAL,
4172 access_misc_efuse_write_err_cnt),
4173[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4174 0, CNTR_NORMAL,
4175 access_misc_efuse_read_bad_addr_err_cnt),
4176[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4177 CNTR_NORMAL,
4178 access_misc_efuse_csr_parity_err_cnt),
4179[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4180 CNTR_NORMAL,
4181 access_misc_fw_auth_failed_err_cnt),
4182[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4183 CNTR_NORMAL,
4184 access_misc_key_mismatch_err_cnt),
4185[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4186 CNTR_NORMAL,
4187 access_misc_sbus_write_failed_err_cnt),
4188[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4189 CNTR_NORMAL,
4190 access_misc_csr_write_bad_addr_err_cnt),
4191[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4192 CNTR_NORMAL,
4193 access_misc_csr_read_bad_addr_err_cnt),
4194[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4195 CNTR_NORMAL,
4196 access_misc_csr_parity_err_cnt),
4197/* CceErrStatus */
4198[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4199 CNTR_NORMAL,
4200 access_sw_cce_err_status_aggregated_cnt),
4201[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4202 CNTR_NORMAL,
4203 access_cce_msix_csr_parity_err_cnt),
4204[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4205 CNTR_NORMAL,
4206 access_cce_int_map_unc_err_cnt),
4207[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4208 CNTR_NORMAL,
4209 access_cce_int_map_cor_err_cnt),
4210[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4211 CNTR_NORMAL,
4212 access_cce_msix_table_unc_err_cnt),
4213[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4214 CNTR_NORMAL,
4215 access_cce_msix_table_cor_err_cnt),
4216[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4217 0, CNTR_NORMAL,
4218 access_cce_rxdma_conv_fifo_parity_err_cnt),
4219[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4220 0, CNTR_NORMAL,
4221 access_cce_rcpl_async_fifo_parity_err_cnt),
4222[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4223 CNTR_NORMAL,
4224 access_cce_seg_write_bad_addr_err_cnt),
4225[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4226 CNTR_NORMAL,
4227 access_cce_seg_read_bad_addr_err_cnt),
4228[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4229 CNTR_NORMAL,
4230 access_la_triggered_cnt),
4231[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4232 CNTR_NORMAL,
4233 access_cce_trgt_cpl_timeout_err_cnt),
4234[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4235 CNTR_NORMAL,
4236 access_pcic_receive_parity_err_cnt),
4237[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4238 CNTR_NORMAL,
4239 access_pcic_transmit_back_parity_err_cnt),
4240[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4241 0, CNTR_NORMAL,
4242 access_pcic_transmit_front_parity_err_cnt),
4243[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4244 CNTR_NORMAL,
4245 access_pcic_cpl_dat_q_unc_err_cnt),
4246[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4247 CNTR_NORMAL,
4248 access_pcic_cpl_hd_q_unc_err_cnt),
4249[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4250 CNTR_NORMAL,
4251 access_pcic_post_dat_q_unc_err_cnt),
4252[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4253 CNTR_NORMAL,
4254 access_pcic_post_hd_q_unc_err_cnt),
4255[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4256 CNTR_NORMAL,
4257 access_pcic_retry_sot_mem_unc_err_cnt),
4258[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4259 CNTR_NORMAL,
4260 access_pcic_retry_mem_unc_err),
4261[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4262 CNTR_NORMAL,
4263 access_pcic_n_post_dat_q_parity_err_cnt),
4264[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4265 CNTR_NORMAL,
4266 access_pcic_n_post_h_q_parity_err_cnt),
4267[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4268 CNTR_NORMAL,
4269 access_pcic_cpl_dat_q_cor_err_cnt),
4270[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4271 CNTR_NORMAL,
4272 access_pcic_cpl_hd_q_cor_err_cnt),
4273[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4274 CNTR_NORMAL,
4275 access_pcic_post_dat_q_cor_err_cnt),
4276[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4277 CNTR_NORMAL,
4278 access_pcic_post_hd_q_cor_err_cnt),
4279[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4280 CNTR_NORMAL,
4281 access_pcic_retry_sot_mem_cor_err_cnt),
4282[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4283 CNTR_NORMAL,
4284 access_pcic_retry_mem_cor_err_cnt),
4285[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4286 "CceCli1AsyncFifoDbgParityError", 0, 0,
4287 CNTR_NORMAL,
4288 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4289[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4290 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4291 CNTR_NORMAL,
4292 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4293 ),
4294[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4295 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4296 CNTR_NORMAL,
4297 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4298[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4299 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4300 CNTR_NORMAL,
4301 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4302[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4303 0, CNTR_NORMAL,
4304 access_cce_cli2_async_fifo_parity_err_cnt),
4305[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4306 CNTR_NORMAL,
4307 access_cce_csr_cfg_bus_parity_err_cnt),
4308[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4309 0, CNTR_NORMAL,
4310 access_cce_cli0_async_fifo_parity_err_cnt),
4311[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4312 CNTR_NORMAL,
4313 access_cce_rspd_data_parity_err_cnt),
4314[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4315 CNTR_NORMAL,
4316 access_cce_trgt_access_err_cnt),
4317[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4318 0, CNTR_NORMAL,
4319 access_cce_trgt_async_fifo_parity_err_cnt),
4320[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4321 CNTR_NORMAL,
4322 access_cce_csr_write_bad_addr_err_cnt),
4323[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4324 CNTR_NORMAL,
4325 access_cce_csr_read_bad_addr_err_cnt),
4326[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4327 CNTR_NORMAL,
4328 access_ccs_csr_parity_err_cnt),
4329
4330/* RcvErrStatus */
4331[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4332 CNTR_NORMAL,
4333 access_rx_csr_parity_err_cnt),
4334[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4335 CNTR_NORMAL,
4336 access_rx_csr_write_bad_addr_err_cnt),
4337[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4338 CNTR_NORMAL,
4339 access_rx_csr_read_bad_addr_err_cnt),
4340[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4341 CNTR_NORMAL,
4342 access_rx_dma_csr_unc_err_cnt),
4343[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4344 CNTR_NORMAL,
4345 access_rx_dma_dq_fsm_encoding_err_cnt),
4346[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4347 CNTR_NORMAL,
4348 access_rx_dma_eq_fsm_encoding_err_cnt),
4349[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4350 CNTR_NORMAL,
4351 access_rx_dma_csr_parity_err_cnt),
4352[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4353 CNTR_NORMAL,
4354 access_rx_rbuf_data_cor_err_cnt),
4355[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4356 CNTR_NORMAL,
4357 access_rx_rbuf_data_unc_err_cnt),
4358[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4359 CNTR_NORMAL,
4360 access_rx_dma_data_fifo_rd_cor_err_cnt),
4361[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4362 CNTR_NORMAL,
4363 access_rx_dma_data_fifo_rd_unc_err_cnt),
4364[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4365 CNTR_NORMAL,
4366 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4367[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4368 CNTR_NORMAL,
4369 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4370[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4371 CNTR_NORMAL,
4372 access_rx_rbuf_desc_part2_cor_err_cnt),
4373[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4374 CNTR_NORMAL,
4375 access_rx_rbuf_desc_part2_unc_err_cnt),
4376[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4377 CNTR_NORMAL,
4378 access_rx_rbuf_desc_part1_cor_err_cnt),
4379[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4380 CNTR_NORMAL,
4381 access_rx_rbuf_desc_part1_unc_err_cnt),
4382[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4383 CNTR_NORMAL,
4384 access_rx_hq_intr_fsm_err_cnt),
4385[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4386 CNTR_NORMAL,
4387 access_rx_hq_intr_csr_parity_err_cnt),
4388[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4389 CNTR_NORMAL,
4390 access_rx_lookup_csr_parity_err_cnt),
4391[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4392 CNTR_NORMAL,
4393 access_rx_lookup_rcv_array_cor_err_cnt),
4394[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4395 CNTR_NORMAL,
4396 access_rx_lookup_rcv_array_unc_err_cnt),
4397[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4398 0, CNTR_NORMAL,
4399 access_rx_lookup_des_part2_parity_err_cnt),
4400[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4401 0, CNTR_NORMAL,
4402 access_rx_lookup_des_part1_unc_cor_err_cnt),
4403[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4404 CNTR_NORMAL,
4405 access_rx_lookup_des_part1_unc_err_cnt),
4406[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_rx_rbuf_next_free_buf_cor_err_cnt),
4409[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4410 CNTR_NORMAL,
4411 access_rx_rbuf_next_free_buf_unc_err_cnt),
4412[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4413 "RxRbufFlInitWrAddrParityErr", 0, 0,
4414 CNTR_NORMAL,
4415 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4416[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4417 0, CNTR_NORMAL,
4418 access_rx_rbuf_fl_initdone_parity_err_cnt),
4419[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4420 0, CNTR_NORMAL,
4421 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4422[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4425[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_rx_rbuf_empty_err_cnt),
4428[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4429 CNTR_NORMAL,
4430 access_rx_rbuf_full_err_cnt),
4431[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4432 CNTR_NORMAL,
4433 access_rbuf_bad_lookup_err_cnt),
4434[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_rbuf_ctx_id_parity_err_cnt),
4437[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_rbuf_csr_qeopdw_parity_err_cnt),
4440[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4441 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4442 CNTR_NORMAL,
4443 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4444[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4445 "RxRbufCsrQTlPtrParityErr", 0, 0,
4446 CNTR_NORMAL,
4447 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4448[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4449 0, CNTR_NORMAL,
4450 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4451[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4452 0, CNTR_NORMAL,
4453 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4454[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4455 0, 0, CNTR_NORMAL,
4456 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4457[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4458 0, CNTR_NORMAL,
4459 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4460[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4461 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4462 CNTR_NORMAL,
4463 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4464[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4465 0, CNTR_NORMAL,
4466 access_rx_rbuf_block_list_read_cor_err_cnt),
4467[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4468 0, CNTR_NORMAL,
4469 access_rx_rbuf_block_list_read_unc_err_cnt),
4470[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_rx_rbuf_lookup_des_cor_err_cnt),
4473[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_rbuf_lookup_des_unc_err_cnt),
4476[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4477 "RxRbufLookupDesRegUncCorErr", 0, 0,
4478 CNTR_NORMAL,
4479 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4480[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4481 CNTR_NORMAL,
4482 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4483[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4484 CNTR_NORMAL,
4485 access_rx_rbuf_free_list_cor_err_cnt),
4486[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4487 CNTR_NORMAL,
4488 access_rx_rbuf_free_list_unc_err_cnt),
4489[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4490 CNTR_NORMAL,
4491 access_rx_rcv_fsm_encoding_err_cnt),
4492[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4493 CNTR_NORMAL,
4494 access_rx_dma_flag_cor_err_cnt),
4495[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4496 CNTR_NORMAL,
4497 access_rx_dma_flag_unc_err_cnt),
4498[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4499 CNTR_NORMAL,
4500 access_rx_dc_sop_eop_parity_err_cnt),
4501[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4502 CNTR_NORMAL,
4503 access_rx_rcv_csr_parity_err_cnt),
4504[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4505 CNTR_NORMAL,
4506 access_rx_rcv_qp_map_table_cor_err_cnt),
4507[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4508 CNTR_NORMAL,
4509 access_rx_rcv_qp_map_table_unc_err_cnt),
4510[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4511 CNTR_NORMAL,
4512 access_rx_rcv_data_cor_err_cnt),
4513[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4514 CNTR_NORMAL,
4515 access_rx_rcv_data_unc_err_cnt),
4516[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4517 CNTR_NORMAL,
4518 access_rx_rcv_hdr_cor_err_cnt),
4519[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4520 CNTR_NORMAL,
4521 access_rx_rcv_hdr_unc_err_cnt),
4522[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4523 CNTR_NORMAL,
4524 access_rx_dc_intf_parity_err_cnt),
4525[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4526 CNTR_NORMAL,
4527 access_rx_dma_csr_cor_err_cnt),
4528/* SendPioErrStatus */
4529[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4530 CNTR_NORMAL,
4531 access_pio_pec_sop_head_parity_err_cnt),
4532[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4533 CNTR_NORMAL,
4534 access_pio_pcc_sop_head_parity_err_cnt),
4535[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4536 0, 0, CNTR_NORMAL,
4537 access_pio_last_returned_cnt_parity_err_cnt),
4538[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4539 0, CNTR_NORMAL,
4540 access_pio_current_free_cnt_parity_err_cnt),
4541[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4542 CNTR_NORMAL,
4543 access_pio_reserved_31_err_cnt),
4544[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4545 CNTR_NORMAL,
4546 access_pio_reserved_30_err_cnt),
4547[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4548 CNTR_NORMAL,
4549 access_pio_ppmc_sop_len_err_cnt),
4550[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4551 CNTR_NORMAL,
4552 access_pio_ppmc_bqc_mem_parity_err_cnt),
4553[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_pio_vl_fifo_parity_err_cnt),
4556[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_pio_vlf_sop_parity_err_cnt),
4559[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4560 CNTR_NORMAL,
4561 access_pio_vlf_v1_len_parity_err_cnt),
4562[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4563 CNTR_NORMAL,
4564 access_pio_block_qw_count_parity_err_cnt),
4565[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4566 CNTR_NORMAL,
4567 access_pio_write_qw_valid_parity_err_cnt),
4568[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4569 CNTR_NORMAL,
4570 access_pio_state_machine_err_cnt),
4571[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4572 CNTR_NORMAL,
4573 access_pio_write_data_parity_err_cnt),
4574[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4575 CNTR_NORMAL,
4576 access_pio_host_addr_mem_cor_err_cnt),
4577[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4578 CNTR_NORMAL,
4579 access_pio_host_addr_mem_unc_err_cnt),
4580[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4581 CNTR_NORMAL,
4582 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4583[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4584 CNTR_NORMAL,
4585 access_pio_init_sm_in_err_cnt),
4586[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4587 CNTR_NORMAL,
4588 access_pio_ppmc_pbl_fifo_err_cnt),
4589[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4590 0, CNTR_NORMAL,
4591 access_pio_credit_ret_fifo_parity_err_cnt),
4592[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4593 CNTR_NORMAL,
4594 access_pio_v1_len_mem_bank1_cor_err_cnt),
4595[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4596 CNTR_NORMAL,
4597 access_pio_v1_len_mem_bank0_cor_err_cnt),
4598[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4599 CNTR_NORMAL,
4600 access_pio_v1_len_mem_bank1_unc_err_cnt),
4601[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4602 CNTR_NORMAL,
4603 access_pio_v1_len_mem_bank0_unc_err_cnt),
4604[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4605 CNTR_NORMAL,
4606 access_pio_sm_pkt_reset_parity_err_cnt),
4607[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4608 CNTR_NORMAL,
4609 access_pio_pkt_evict_fifo_parity_err_cnt),
4610[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4611 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4612 CNTR_NORMAL,
4613 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4614[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4615 CNTR_NORMAL,
4616 access_pio_sbrdctl_crrel_parity_err_cnt),
4617[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4618 CNTR_NORMAL,
4619 access_pio_pec_fifo_parity_err_cnt),
4620[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4621 CNTR_NORMAL,
4622 access_pio_pcc_fifo_parity_err_cnt),
4623[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4624 CNTR_NORMAL,
4625 access_pio_sb_mem_fifo1_err_cnt),
4626[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4627 CNTR_NORMAL,
4628 access_pio_sb_mem_fifo0_err_cnt),
4629[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_pio_csr_parity_err_cnt),
4632[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4633 CNTR_NORMAL,
4634 access_pio_write_addr_parity_err_cnt),
4635[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4636 CNTR_NORMAL,
4637 access_pio_write_bad_ctxt_err_cnt),
4638/* SendDmaErrStatus */
4639[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4640 0, CNTR_NORMAL,
4641 access_sdma_pcie_req_tracking_cor_err_cnt),
4642[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4643 0, CNTR_NORMAL,
4644 access_sdma_pcie_req_tracking_unc_err_cnt),
4645[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_sdma_csr_parity_err_cnt),
4648[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_sdma_rpy_tag_err_cnt),
4651/* SendEgressErrStatus */
4652[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4653 CNTR_NORMAL,
4654 access_tx_read_pio_memory_csr_unc_err_cnt),
4655[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4656 0, CNTR_NORMAL,
4657 access_tx_read_sdma_memory_csr_err_cnt),
4658[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4659 CNTR_NORMAL,
4660 access_tx_egress_fifo_cor_err_cnt),
4661[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4662 CNTR_NORMAL,
4663 access_tx_read_pio_memory_cor_err_cnt),
4664[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4665 CNTR_NORMAL,
4666 access_tx_read_sdma_memory_cor_err_cnt),
4667[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4668 CNTR_NORMAL,
4669 access_tx_sb_hdr_cor_err_cnt),
4670[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4671 CNTR_NORMAL,
4672 access_tx_credit_overrun_err_cnt),
4673[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4674 CNTR_NORMAL,
4675 access_tx_launch_fifo8_cor_err_cnt),
4676[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_tx_launch_fifo7_cor_err_cnt),
4679[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_tx_launch_fifo6_cor_err_cnt),
4682[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4683 CNTR_NORMAL,
4684 access_tx_launch_fifo5_cor_err_cnt),
4685[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_tx_launch_fifo4_cor_err_cnt),
4688[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4689 CNTR_NORMAL,
4690 access_tx_launch_fifo3_cor_err_cnt),
4691[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4692 CNTR_NORMAL,
4693 access_tx_launch_fifo2_cor_err_cnt),
4694[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_tx_launch_fifo1_cor_err_cnt),
4697[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_tx_launch_fifo0_cor_err_cnt),
4700[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4701 CNTR_NORMAL,
4702 access_tx_credit_return_vl_err_cnt),
4703[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4704 CNTR_NORMAL,
4705 access_tx_hcrc_insertion_err_cnt),
4706[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4707 CNTR_NORMAL,
4708 access_tx_egress_fifo_unc_err_cnt),
4709[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_tx_read_pio_memory_unc_err_cnt),
4712[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4713 CNTR_NORMAL,
4714 access_tx_read_sdma_memory_unc_err_cnt),
4715[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4716 CNTR_NORMAL,
4717 access_tx_sb_hdr_unc_err_cnt),
4718[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4719 CNTR_NORMAL,
4720 access_tx_credit_return_partiy_err_cnt),
4721[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4722 0, 0, CNTR_NORMAL,
4723 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4724[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4725 0, 0, CNTR_NORMAL,
4726 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4727[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4728 0, 0, CNTR_NORMAL,
4729 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4730[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4731 0, 0, CNTR_NORMAL,
4732 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4733[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4734 0, 0, CNTR_NORMAL,
4735 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4736[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4737 0, 0, CNTR_NORMAL,
4738 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4739[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4740 0, 0, CNTR_NORMAL,
4741 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4742[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4743 0, 0, CNTR_NORMAL,
4744 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4745[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4746 0, 0, CNTR_NORMAL,
4747 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4748[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4749 0, 0, CNTR_NORMAL,
4750 access_tx_sdma15_disallowed_packet_err_cnt),
4751[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4752 0, 0, CNTR_NORMAL,
4753 access_tx_sdma14_disallowed_packet_err_cnt),
4754[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4755 0, 0, CNTR_NORMAL,
4756 access_tx_sdma13_disallowed_packet_err_cnt),
4757[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4758 0, 0, CNTR_NORMAL,
4759 access_tx_sdma12_disallowed_packet_err_cnt),
4760[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4761 0, 0, CNTR_NORMAL,
4762 access_tx_sdma11_disallowed_packet_err_cnt),
4763[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4764 0, 0, CNTR_NORMAL,
4765 access_tx_sdma10_disallowed_packet_err_cnt),
4766[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4767 0, 0, CNTR_NORMAL,
4768 access_tx_sdma9_disallowed_packet_err_cnt),
4769[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4770 0, 0, CNTR_NORMAL,
4771 access_tx_sdma8_disallowed_packet_err_cnt),
4772[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4773 0, 0, CNTR_NORMAL,
4774 access_tx_sdma7_disallowed_packet_err_cnt),
4775[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4776 0, 0, CNTR_NORMAL,
4777 access_tx_sdma6_disallowed_packet_err_cnt),
4778[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4779 0, 0, CNTR_NORMAL,
4780 access_tx_sdma5_disallowed_packet_err_cnt),
4781[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4782 0, 0, CNTR_NORMAL,
4783 access_tx_sdma4_disallowed_packet_err_cnt),
4784[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4785 0, 0, CNTR_NORMAL,
4786 access_tx_sdma3_disallowed_packet_err_cnt),
4787[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4788 0, 0, CNTR_NORMAL,
4789 access_tx_sdma2_disallowed_packet_err_cnt),
4790[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4791 0, 0, CNTR_NORMAL,
4792 access_tx_sdma1_disallowed_packet_err_cnt),
4793[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4794 0, 0, CNTR_NORMAL,
4795 access_tx_sdma0_disallowed_packet_err_cnt),
4796[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4797 CNTR_NORMAL,
4798 access_tx_config_parity_err_cnt),
4799[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4800 CNTR_NORMAL,
4801 access_tx_sbrd_ctl_csr_parity_err_cnt),
4802[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_tx_launch_csr_parity_err_cnt),
4805[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4806 CNTR_NORMAL,
4807 access_tx_illegal_vl_err_cnt),
4808[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4809 "TxSbrdCtlStateMachineParityErr", 0, 0,
4810 CNTR_NORMAL,
4811 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4812[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4813 CNTR_NORMAL,
4814 access_egress_reserved_10_err_cnt),
4815[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4816 CNTR_NORMAL,
4817 access_egress_reserved_9_err_cnt),
4818[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4819 0, 0, CNTR_NORMAL,
4820 access_tx_sdma_launch_intf_parity_err_cnt),
4821[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4822 CNTR_NORMAL,
4823 access_tx_pio_launch_intf_parity_err_cnt),
4824[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4825 CNTR_NORMAL,
4826 access_egress_reserved_6_err_cnt),
4827[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_tx_incorrect_link_state_err_cnt),
4830[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4831 CNTR_NORMAL,
4832 access_tx_linkdown_err_cnt),
4833[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4834 "EgressFifoUnderrunOrParityErr", 0, 0,
4835 CNTR_NORMAL,
4836 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4837[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4838 CNTR_NORMAL,
4839 access_egress_reserved_2_err_cnt),
4840[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4841 CNTR_NORMAL,
4842 access_tx_pkt_integrity_mem_unc_err_cnt),
4843[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4844 CNTR_NORMAL,
4845 access_tx_pkt_integrity_mem_cor_err_cnt),
4846/* SendErrStatus */
4847[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4848 CNTR_NORMAL,
4849 access_send_csr_write_bad_addr_err_cnt),
4850[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4851 CNTR_NORMAL,
4852 access_send_csr_read_bad_addr_err_cnt),
4853[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4854 CNTR_NORMAL,
4855 access_send_csr_parity_cnt),
4856/* SendCtxtErrStatus */
4857[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4858 CNTR_NORMAL,
4859 access_pio_write_out_of_bounds_err_cnt),
4860[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4861 CNTR_NORMAL,
4862 access_pio_write_overflow_err_cnt),
4863[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4864 0, 0, CNTR_NORMAL,
4865 access_pio_write_crosses_boundary_err_cnt),
4866[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4867 CNTR_NORMAL,
4868 access_pio_disallowed_packet_err_cnt),
4869[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4870 CNTR_NORMAL,
4871 access_pio_inconsistent_sop_err_cnt),
4872/* SendDmaEngErrStatus */
4873[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4874 0, 0, CNTR_NORMAL,
4875 access_sdma_header_request_fifo_cor_err_cnt),
4876[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4877 CNTR_NORMAL,
4878 access_sdma_header_storage_cor_err_cnt),
4879[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4880 CNTR_NORMAL,
4881 access_sdma_packet_tracking_cor_err_cnt),
4882[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4883 CNTR_NORMAL,
4884 access_sdma_assembly_cor_err_cnt),
4885[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4886 CNTR_NORMAL,
4887 access_sdma_desc_table_cor_err_cnt),
4888[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4889 0, 0, CNTR_NORMAL,
4890 access_sdma_header_request_fifo_unc_err_cnt),
4891[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4892 CNTR_NORMAL,
4893 access_sdma_header_storage_unc_err_cnt),
4894[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4895 CNTR_NORMAL,
4896 access_sdma_packet_tracking_unc_err_cnt),
4897[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4898 CNTR_NORMAL,
4899 access_sdma_assembly_unc_err_cnt),
4900[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4901 CNTR_NORMAL,
4902 access_sdma_desc_table_unc_err_cnt),
4903[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4904 CNTR_NORMAL,
4905 access_sdma_timeout_err_cnt),
4906[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4907 CNTR_NORMAL,
4908 access_sdma_header_length_err_cnt),
4909[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4910 CNTR_NORMAL,
4911 access_sdma_header_address_err_cnt),
4912[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4913 CNTR_NORMAL,
4914 access_sdma_header_select_err_cnt),
4915[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4916 CNTR_NORMAL,
4917 access_sdma_reserved_9_err_cnt),
4918[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4919 CNTR_NORMAL,
4920 access_sdma_packet_desc_overflow_err_cnt),
4921[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4922 CNTR_NORMAL,
4923 access_sdma_length_mismatch_err_cnt),
4924[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4925 CNTR_NORMAL,
4926 access_sdma_halt_err_cnt),
4927[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4928 CNTR_NORMAL,
4929 access_sdma_mem_read_err_cnt),
4930[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4931 CNTR_NORMAL,
4932 access_sdma_first_desc_err_cnt),
4933[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4934 CNTR_NORMAL,
4935 access_sdma_tail_out_of_bounds_err_cnt),
4936[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4937 CNTR_NORMAL,
4938 access_sdma_too_long_err_cnt),
4939[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4940 CNTR_NORMAL,
4941 access_sdma_gen_mismatch_err_cnt),
4942[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4943 CNTR_NORMAL,
4944 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004945};
4946
4947static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4948[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4949 CNTR_NORMAL),
4950[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4951 CNTR_NORMAL),
4952[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4953 CNTR_NORMAL),
4954[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4955 CNTR_NORMAL),
4956[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4957 CNTR_NORMAL),
4958[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4959 CNTR_NORMAL),
4960[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4961 CNTR_NORMAL),
4962[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4963[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4964[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4965[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004966 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004967[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004968 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004969[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004970 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004971[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4972[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4973[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004974 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004975[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004976 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004977[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4978 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004979[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08004980 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004981[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08004982 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4983 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004984[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08004985 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004986[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08004987 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004988[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4989[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4990[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4991[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4992[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4993[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4994[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4995[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4996[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4997[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4998[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4999[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5000[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5001 access_sw_cpu_rc_acks),
5002[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005003 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005004[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005005 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005006[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5007[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5008[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5009[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5010[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5011[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5012[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5013[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5014[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5015[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5016[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5017[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5018[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5019[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5020[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5021[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5022[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5023[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5024[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5025[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5026[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5027[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5028[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5029[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5030[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5031[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5032[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5033[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5034[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5035[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5036[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5037[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5038[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5039[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5040[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5041[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5042[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5043[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5044[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5045[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5046[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5047[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5048[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5049[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5050[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5051[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5052[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5053[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5054[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5055[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5056[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5057[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5058[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5059[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5060[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5061[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5062[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5063[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5064[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5065[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5066[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5067[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5068[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5069[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5070[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5071[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5072[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5073[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5074[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5075[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5076[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5077[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5078[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5079[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5080[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5081[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5082[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5083[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5084[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5085[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5086};
5087
5088/* ======================================================================== */
5089
Mike Marciniszyn77241052015-07-30 15:17:43 -04005090/* return true if this is chip revision revision a */
5091int is_ax(struct hfi1_devdata *dd)
5092{
5093 u8 chip_rev_minor =
5094 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5095 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5096 return (chip_rev_minor & 0xf0) == 0;
5097}
5098
5099/* return true if this is chip revision revision b */
5100int is_bx(struct hfi1_devdata *dd)
5101{
5102 u8 chip_rev_minor =
5103 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5104 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005105 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005106}
5107
5108/*
5109 * Append string s to buffer buf. Arguments curp and len are the current
5110 * position and remaining length, respectively.
5111 *
5112 * return 0 on success, 1 on out of room
5113 */
5114static int append_str(char *buf, char **curp, int *lenp, const char *s)
5115{
5116 char *p = *curp;
5117 int len = *lenp;
5118 int result = 0; /* success */
5119 char c;
5120
5121 /* add a comma, if first in the buffer */
5122 if (p != buf) {
5123 if (len == 0) {
5124 result = 1; /* out of room */
5125 goto done;
5126 }
5127 *p++ = ',';
5128 len--;
5129 }
5130
5131 /* copy the string */
5132 while ((c = *s++) != 0) {
5133 if (len == 0) {
5134 result = 1; /* out of room */
5135 goto done;
5136 }
5137 *p++ = c;
5138 len--;
5139 }
5140
5141done:
5142 /* write return values */
5143 *curp = p;
5144 *lenp = len;
5145
5146 return result;
5147}
5148
5149/*
5150 * Using the given flag table, print a comma separated string into
5151 * the buffer. End in '*' if the buffer is too short.
5152 */
5153static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005154 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005155{
5156 char extra[32];
5157 char *p = buf;
5158 int len = buf_len;
5159 int no_room = 0;
5160 int i;
5161
5162 /* make sure there is at least 2 so we can form "*" */
5163 if (len < 2)
5164 return "";
5165
5166 len--; /* leave room for a nul */
5167 for (i = 0; i < table_size; i++) {
5168 if (flags & table[i].flag) {
5169 no_room = append_str(buf, &p, &len, table[i].str);
5170 if (no_room)
5171 break;
5172 flags &= ~table[i].flag;
5173 }
5174 }
5175
5176 /* any undocumented bits left? */
5177 if (!no_room && flags) {
5178 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5179 no_room = append_str(buf, &p, &len, extra);
5180 }
5181
5182 /* add * if ran out of room */
5183 if (no_room) {
5184 /* may need to back up to add space for a '*' */
5185 if (len == 0)
5186 --p;
5187 *p++ = '*';
5188 }
5189
5190 /* add final nul - space already allocated above */
5191 *p = 0;
5192 return buf;
5193}
5194
5195/* first 8 CCE error interrupt source names */
5196static const char * const cce_misc_names[] = {
5197 "CceErrInt", /* 0 */
5198 "RxeErrInt", /* 1 */
5199 "MiscErrInt", /* 2 */
5200 "Reserved3", /* 3 */
5201 "PioErrInt", /* 4 */
5202 "SDmaErrInt", /* 5 */
5203 "EgressErrInt", /* 6 */
5204 "TxeErrInt" /* 7 */
5205};
5206
5207/*
5208 * Return the miscellaneous error interrupt name.
5209 */
5210static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5211{
5212 if (source < ARRAY_SIZE(cce_misc_names))
5213 strncpy(buf, cce_misc_names[source], bsize);
5214 else
Jubin John17fb4f22016-02-14 20:21:52 -08005215 snprintf(buf, bsize, "Reserved%u",
5216 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005217
5218 return buf;
5219}
5220
5221/*
5222 * Return the SDMA engine error interrupt name.
5223 */
5224static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5225{
5226 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5227 return buf;
5228}
5229
5230/*
5231 * Return the send context error interrupt name.
5232 */
5233static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5234{
5235 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5236 return buf;
5237}
5238
5239static const char * const various_names[] = {
5240 "PbcInt",
5241 "GpioAssertInt",
5242 "Qsfp1Int",
5243 "Qsfp2Int",
5244 "TCritInt"
5245};
5246
5247/*
5248 * Return the various interrupt name.
5249 */
5250static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5251{
5252 if (source < ARRAY_SIZE(various_names))
5253 strncpy(buf, various_names[source], bsize);
5254 else
Jubin John8638b772016-02-14 20:19:24 -08005255 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005256 return buf;
5257}
5258
5259/*
5260 * Return the DC interrupt name.
5261 */
5262static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5263{
5264 static const char * const dc_int_names[] = {
5265 "common",
5266 "lcb",
5267 "8051",
5268 "lbm" /* local block merge */
5269 };
5270
5271 if (source < ARRAY_SIZE(dc_int_names))
5272 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5273 else
5274 snprintf(buf, bsize, "DCInt%u", source);
5275 return buf;
5276}
5277
5278static const char * const sdma_int_names[] = {
5279 "SDmaInt",
5280 "SdmaIdleInt",
5281 "SdmaProgressInt",
5282};
5283
5284/*
5285 * Return the SDMA engine interrupt name.
5286 */
5287static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5288{
5289 /* what interrupt */
5290 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5291 /* which engine */
5292 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5293
5294 if (likely(what < 3))
5295 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5296 else
5297 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5298 return buf;
5299}
5300
5301/*
5302 * Return the receive available interrupt name.
5303 */
5304static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5305{
5306 snprintf(buf, bsize, "RcvAvailInt%u", source);
5307 return buf;
5308}
5309
5310/*
5311 * Return the receive urgent interrupt name.
5312 */
5313static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5314{
5315 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5316 return buf;
5317}
5318
5319/*
5320 * Return the send credit interrupt name.
5321 */
5322static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5323{
5324 snprintf(buf, bsize, "SendCreditInt%u", source);
5325 return buf;
5326}
5327
5328/*
5329 * Return the reserved interrupt name.
5330 */
5331static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5332{
5333 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5334 return buf;
5335}
5336
5337static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5338{
5339 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005340 cce_err_status_flags,
5341 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005342}
5343
5344static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5345{
5346 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005347 rxe_err_status_flags,
5348 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005349}
5350
5351static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5352{
5353 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005354 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005355}
5356
5357static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5358{
5359 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005360 pio_err_status_flags,
5361 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005362}
5363
5364static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5365{
5366 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005367 sdma_err_status_flags,
5368 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005369}
5370
5371static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5372{
5373 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005374 egress_err_status_flags,
5375 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005376}
5377
5378static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5379{
5380 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005381 egress_err_info_flags,
5382 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005383}
5384
5385static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5386{
5387 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005388 send_err_status_flags,
5389 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005390}
5391
5392static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5393{
5394 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005395 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005396
5397 /*
5398 * For most these errors, there is nothing that can be done except
5399 * report or record it.
5400 */
5401 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005402 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005403
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005404 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5405 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005406 /* this error requires a manual drop into SPC freeze mode */
5407 /* then a fix up */
5408 start_freeze_handling(dd->pport, FREEZE_SELF);
5409 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005410
5411 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5412 if (reg & (1ull << i)) {
5413 incr_cntr64(&dd->cce_err_status_cnt[i]);
5414 /* maintain a counter over all cce_err_status errors */
5415 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5416 }
5417 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005418}
5419
5420/*
5421 * Check counters for receive errors that do not have an interrupt
5422 * associated with them.
5423 */
5424#define RCVERR_CHECK_TIME 10
5425static void update_rcverr_timer(unsigned long opaque)
5426{
5427 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5428 struct hfi1_pportdata *ppd = dd->pport;
5429 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5430
5431 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005432 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005433 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005434 set_link_down_reason(
5435 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5436 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005437 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5438 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005439 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005440
5441 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5442}
5443
5444static int init_rcverr(struct hfi1_devdata *dd)
5445{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305446 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005447 /* Assume the hardware counter has been reset */
5448 dd->rcv_ovfl_cnt = 0;
5449 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5450}
5451
5452static void free_rcverr(struct hfi1_devdata *dd)
5453{
5454 if (dd->rcverr_timer.data)
5455 del_timer_sync(&dd->rcverr_timer);
5456 dd->rcverr_timer.data = 0;
5457}
5458
5459static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5460{
5461 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005462 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005463
5464 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005465 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005466
5467 if (reg & ALL_RXE_FREEZE_ERR) {
5468 int flags = 0;
5469
5470 /*
5471 * Freeze mode recovery is disabled for the errors
5472 * in RXE_FREEZE_ABORT_MASK
5473 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005474 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005475 flags = FREEZE_ABORT;
5476
5477 start_freeze_handling(dd->pport, flags);
5478 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005479
5480 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5481 if (reg & (1ull << i))
5482 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5483 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005484}
5485
5486static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5487{
5488 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005489 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005490
5491 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005492 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005493 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5494 if (reg & (1ull << i))
5495 incr_cntr64(&dd->misc_err_status_cnt[i]);
5496 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005497}
5498
5499static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5500{
5501 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005502 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005503
5504 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005505 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005506
5507 if (reg & ALL_PIO_FREEZE_ERR)
5508 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005509
5510 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5511 if (reg & (1ull << i))
5512 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5513 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005514}
5515
5516static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5517{
5518 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005519 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005520
5521 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005522 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005523
5524 if (reg & ALL_SDMA_FREEZE_ERR)
5525 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005526
5527 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5528 if (reg & (1ull << i))
5529 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5530 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005531}
5532
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005533static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5534{
5535 incr_cntr64(&ppd->port_xmit_discards);
5536}
5537
Mike Marciniszyn77241052015-07-30 15:17:43 -04005538static void count_port_inactive(struct hfi1_devdata *dd)
5539{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005540 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005541}
5542
5543/*
5544 * We have had a "disallowed packet" error during egress. Determine the
5545 * integrity check which failed, and update relevant error counter, etc.
5546 *
5547 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5548 * bit of state per integrity check, and so we can miss the reason for an
5549 * egress error if more than one packet fails the same integrity check
5550 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5551 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005552static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5553 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005554{
5555 struct hfi1_pportdata *ppd = dd->pport;
5556 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5557 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5558 char buf[96];
5559
5560 /* clear down all observed info as quickly as possible after read */
5561 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5562
5563 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005564 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5565 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005566
5567 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005568 if (info & PORT_DISCARD_EGRESS_ERRS) {
5569 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005570
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005571 /*
5572 * Count all, in case multiple bits are set. Reminder:
5573 * since there is only one info register for many sources,
5574 * these may be attributed to the wrong VL if they occur
5575 * too close together.
5576 */
5577 weight = hweight64(info);
5578 for (i = 0; i < weight; i++) {
5579 __count_port_discards(ppd);
5580 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5581 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5582 else if (vl == 15)
5583 incr_cntr64(&ppd->port_xmit_discards_vl
5584 [C_VL_15]);
5585 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005586 }
5587}
5588
5589/*
5590 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5591 * register. Does it represent a 'port inactive' error?
5592 */
5593static inline int port_inactive_err(u64 posn)
5594{
5595 return (posn >= SEES(TX_LINKDOWN) &&
5596 posn <= SEES(TX_INCORRECT_LINK_STATE));
5597}
5598
5599/*
5600 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5601 * register. Does it represent a 'disallowed packet' error?
5602 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005603static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005604{
5605 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5606 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5607}
5608
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005609/*
5610 * Input value is a bit position of one of the SDMA engine disallowed
5611 * packet errors. Return which engine. Use of this must be guarded by
5612 * disallowed_pkt_err().
5613 */
5614static inline int disallowed_pkt_engine(int posn)
5615{
5616 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5617}
5618
5619/*
5620 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5621 * be done.
5622 */
5623static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5624{
5625 struct sdma_vl_map *m;
5626 int vl;
5627
5628 /* range check */
5629 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5630 return -1;
5631
5632 rcu_read_lock();
5633 m = rcu_dereference(dd->sdma_map);
5634 vl = m->engine_to_vl[engine];
5635 rcu_read_unlock();
5636
5637 return vl;
5638}
5639
5640/*
5641 * Translate the send context (sofware index) into a VL. Return -1 if the
5642 * translation cannot be done.
5643 */
5644static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5645{
5646 struct send_context_info *sci;
5647 struct send_context *sc;
5648 int i;
5649
5650 sci = &dd->send_contexts[sw_index];
5651
5652 /* there is no information for user (PSM) and ack contexts */
5653 if (sci->type != SC_KERNEL)
5654 return -1;
5655
5656 sc = sci->sc;
5657 if (!sc)
5658 return -1;
5659 if (dd->vld[15].sc == sc)
5660 return 15;
5661 for (i = 0; i < num_vls; i++)
5662 if (dd->vld[i].sc == sc)
5663 return i;
5664
5665 return -1;
5666}
5667
Mike Marciniszyn77241052015-07-30 15:17:43 -04005668static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5669{
5670 u64 reg_copy = reg, handled = 0;
5671 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005672 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005673
5674 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5675 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005676 else if (is_ax(dd) &&
5677 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5678 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005679 start_freeze_handling(dd->pport, 0);
5680
5681 while (reg_copy) {
5682 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005683 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005684 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005685 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005686
5687 if (port_inactive_err(shift)) {
5688 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005689 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005690 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005691 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5692
5693 handle_send_egress_err_info(dd, vl);
5694 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005695 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005696 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005697 }
5698
5699 reg &= ~handled;
5700
5701 if (reg)
5702 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005703 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005704
5705 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5706 if (reg & (1ull << i))
5707 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5708 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005709}
5710
5711static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5712{
5713 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005714 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005715
5716 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005717 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005718
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005719 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5720 if (reg & (1ull << i))
5721 incr_cntr64(&dd->send_err_status_cnt[i]);
5722 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005723}
5724
5725/*
5726 * The maximum number of times the error clear down will loop before
5727 * blocking a repeating error. This value is arbitrary.
5728 */
5729#define MAX_CLEAR_COUNT 20
5730
5731/*
5732 * Clear and handle an error register. All error interrupts are funneled
5733 * through here to have a central location to correctly handle single-
5734 * or multi-shot errors.
5735 *
5736 * For non per-context registers, call this routine with a context value
5737 * of 0 so the per-context offset is zero.
5738 *
5739 * If the handler loops too many times, assume that something is wrong
5740 * and can't be fixed, so mask the error bits.
5741 */
5742static void interrupt_clear_down(struct hfi1_devdata *dd,
5743 u32 context,
5744 const struct err_reg_info *eri)
5745{
5746 u64 reg;
5747 u32 count;
5748
5749 /* read in a loop until no more errors are seen */
5750 count = 0;
5751 while (1) {
5752 reg = read_kctxt_csr(dd, context, eri->status);
5753 if (reg == 0)
5754 break;
5755 write_kctxt_csr(dd, context, eri->clear, reg);
5756 if (likely(eri->handler))
5757 eri->handler(dd, context, reg);
5758 count++;
5759 if (count > MAX_CLEAR_COUNT) {
5760 u64 mask;
5761
5762 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005763 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005764 /*
5765 * Read-modify-write so any other masked bits
5766 * remain masked.
5767 */
5768 mask = read_kctxt_csr(dd, context, eri->mask);
5769 mask &= ~reg;
5770 write_kctxt_csr(dd, context, eri->mask, mask);
5771 break;
5772 }
5773 }
5774}
5775
5776/*
5777 * CCE block "misc" interrupt. Source is < 16.
5778 */
5779static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5780{
5781 const struct err_reg_info *eri = &misc_errs[source];
5782
5783 if (eri->handler) {
5784 interrupt_clear_down(dd, 0, eri);
5785 } else {
5786 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005787 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005788 }
5789}
5790
5791static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5792{
5793 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005794 sc_err_status_flags,
5795 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005796}
5797
5798/*
5799 * Send context error interrupt. Source (hw_context) is < 160.
5800 *
5801 * All send context errors cause the send context to halt. The normal
5802 * clear-down mechanism cannot be used because we cannot clear the
5803 * error bits until several other long-running items are done first.
5804 * This is OK because with the context halted, nothing else is going
5805 * to happen on it anyway.
5806 */
5807static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5808 unsigned int hw_context)
5809{
5810 struct send_context_info *sci;
5811 struct send_context *sc;
5812 char flags[96];
5813 u64 status;
5814 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005815 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005816
5817 sw_index = dd->hw_to_sw[hw_context];
5818 if (sw_index >= dd->num_send_contexts) {
5819 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005820 "out of range sw index %u for send context %u\n",
5821 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005822 return;
5823 }
5824 sci = &dd->send_contexts[sw_index];
5825 sc = sci->sc;
5826 if (!sc) {
5827 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005828 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005829 return;
5830 }
5831
5832 /* tell the software that a halt has begun */
5833 sc_stop(sc, SCF_HALTED);
5834
5835 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5836
5837 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005838 send_context_err_status_string(flags, sizeof(flags),
5839 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005840
5841 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005842 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005843
5844 /*
5845 * Automatically restart halted kernel contexts out of interrupt
5846 * context. User contexts must ask the driver to restart the context.
5847 */
5848 if (sc->type != SC_USER)
5849 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005850
5851 /*
5852 * Update the counters for the corresponding status bits.
5853 * Note that these particular counters are aggregated over all
5854 * 160 contexts.
5855 */
5856 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5857 if (status & (1ull << i))
5858 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5859 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005860}
5861
5862static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5863 unsigned int source, u64 status)
5864{
5865 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005866 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005867
5868 sde = &dd->per_sdma[source];
5869#ifdef CONFIG_SDMA_VERBOSITY
5870 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5871 slashstrip(__FILE__), __LINE__, __func__);
5872 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5873 sde->this_idx, source, (unsigned long long)status);
5874#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005875 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005876 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005877
5878 /*
5879 * Update the counters for the corresponding status bits.
5880 * Note that these particular counters are aggregated over
5881 * all 16 DMA engines.
5882 */
5883 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5884 if (status & (1ull << i))
5885 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5886 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005887}
5888
5889/*
5890 * CCE block SDMA error interrupt. Source is < 16.
5891 */
5892static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5893{
5894#ifdef CONFIG_SDMA_VERBOSITY
5895 struct sdma_engine *sde = &dd->per_sdma[source];
5896
5897 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5898 slashstrip(__FILE__), __LINE__, __func__);
5899 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5900 source);
5901 sdma_dumpstate(sde);
5902#endif
5903 interrupt_clear_down(dd, source, &sdma_eng_err);
5904}
5905
5906/*
5907 * CCE block "various" interrupt. Source is < 8.
5908 */
5909static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5910{
5911 const struct err_reg_info *eri = &various_err[source];
5912
5913 /*
5914 * TCritInt cannot go through interrupt_clear_down()
5915 * because it is not a second tier interrupt. The handler
5916 * should be called directly.
5917 */
5918 if (source == TCRIT_INT_SOURCE)
5919 handle_temp_err(dd);
5920 else if (eri->handler)
5921 interrupt_clear_down(dd, 0, eri);
5922 else
5923 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005924 "%s: Unimplemented/reserved interrupt %d\n",
5925 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005926}
5927
5928static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5929{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005930 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005931 struct hfi1_pportdata *ppd = dd->pport;
5932 unsigned long flags;
5933 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5934
5935 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005936 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005937 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005938
5939 if (!qsfp_mod_present(ppd)) {
5940 ppd->driver_link_ready = 0;
5941 /*
5942 * Cable removed, reset all our information about the
5943 * cache and cable capabilities
5944 */
5945
5946 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5947 /*
5948 * We don't set cache_refresh_required here as we expect
5949 * an interrupt when a cable is inserted
5950 */
5951 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005952 ppd->qsfp_info.reset_needed = 0;
5953 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005954 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08005955 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005956 /* Invert the ModPresent pin now to detect plug-in */
5957 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5958 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005959
5960 if ((ppd->offline_disabled_reason >
5961 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005962 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08005963 (ppd->offline_disabled_reason ==
5964 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5965 ppd->offline_disabled_reason =
5966 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005967 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005968
Mike Marciniszyn77241052015-07-30 15:17:43 -04005969 if (ppd->host_link_state == HLS_DN_POLL) {
5970 /*
5971 * The link is still in POLL. This means
5972 * that the normal link down processing
5973 * will not happen. We have to do it here
5974 * before turning the DC off.
5975 */
5976 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5977 }
5978 } else {
5979 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5980 ppd->qsfp_info.cache_valid = 0;
5981 ppd->qsfp_info.cache_refresh_required = 1;
5982 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08005983 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005984
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005985 /*
5986 * Stop inversion of ModPresent pin to detect
5987 * removal of the cable
5988 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005989 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005990 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5991 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5992
5993 ppd->offline_disabled_reason =
5994 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005995 }
5996 }
5997
5998 if (reg & QSFP_HFI0_INT_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005999 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006000 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006001 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6002 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006003 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6004 }
6005
6006 /* Schedule the QSFP work only if there is a cable attached. */
6007 if (qsfp_mod_present(ppd))
6008 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6009}
6010
6011static int request_host_lcb_access(struct hfi1_devdata *dd)
6012{
6013 int ret;
6014
6015 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006016 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6017 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006018 if (ret != HCMD_SUCCESS) {
6019 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006020 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006021 }
6022 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6023}
6024
6025static int request_8051_lcb_access(struct hfi1_devdata *dd)
6026{
6027 int ret;
6028
6029 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006030 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6031 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006032 if (ret != HCMD_SUCCESS) {
6033 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006034 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006035 }
6036 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6037}
6038
6039/*
6040 * Set the LCB selector - allow host access. The DCC selector always
6041 * points to the host.
6042 */
6043static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6044{
6045 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006046 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6047 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006048}
6049
6050/*
6051 * Clear the LCB selector - allow 8051 access. The DCC selector always
6052 * points to the host.
6053 */
6054static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6055{
6056 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006057 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006058}
6059
6060/*
6061 * Acquire LCB access from the 8051. If the host already has access,
6062 * just increment a counter. Otherwise, inform the 8051 that the
6063 * host is taking access.
6064 *
6065 * Returns:
6066 * 0 on success
6067 * -EBUSY if the 8051 has control and cannot be disturbed
6068 * -errno if unable to acquire access from the 8051
6069 */
6070int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6071{
6072 struct hfi1_pportdata *ppd = dd->pport;
6073 int ret = 0;
6074
6075 /*
6076 * Use the host link state lock so the operation of this routine
6077 * { link state check, selector change, count increment } can occur
6078 * as a unit against a link state change. Otherwise there is a
6079 * race between the state change and the count increment.
6080 */
6081 if (sleep_ok) {
6082 mutex_lock(&ppd->hls_lock);
6083 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006084 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006085 udelay(1);
6086 }
6087
6088 /* this access is valid only when the link is up */
6089 if ((ppd->host_link_state & HLS_UP) == 0) {
6090 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006091 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006092 ret = -EBUSY;
6093 goto done;
6094 }
6095
6096 if (dd->lcb_access_count == 0) {
6097 ret = request_host_lcb_access(dd);
6098 if (ret) {
6099 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006100 "%s: unable to acquire LCB access, err %d\n",
6101 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006102 goto done;
6103 }
6104 set_host_lcb_access(dd);
6105 }
6106 dd->lcb_access_count++;
6107done:
6108 mutex_unlock(&ppd->hls_lock);
6109 return ret;
6110}
6111
6112/*
6113 * Release LCB access by decrementing the use count. If the count is moving
6114 * from 1 to 0, inform 8051 that it has control back.
6115 *
6116 * Returns:
6117 * 0 on success
6118 * -errno if unable to release access to the 8051
6119 */
6120int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6121{
6122 int ret = 0;
6123
6124 /*
6125 * Use the host link state lock because the acquire needed it.
6126 * Here, we only need to keep { selector change, count decrement }
6127 * as a unit.
6128 */
6129 if (sleep_ok) {
6130 mutex_lock(&dd->pport->hls_lock);
6131 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006132 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006133 udelay(1);
6134 }
6135
6136 if (dd->lcb_access_count == 0) {
6137 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006138 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006139 goto done;
6140 }
6141
6142 if (dd->lcb_access_count == 1) {
6143 set_8051_lcb_access(dd);
6144 ret = request_8051_lcb_access(dd);
6145 if (ret) {
6146 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006147 "%s: unable to release LCB access, err %d\n",
6148 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006149 /* restore host access if the grant didn't work */
6150 set_host_lcb_access(dd);
6151 goto done;
6152 }
6153 }
6154 dd->lcb_access_count--;
6155done:
6156 mutex_unlock(&dd->pport->hls_lock);
6157 return ret;
6158}
6159
6160/*
6161 * Initialize LCB access variables and state. Called during driver load,
6162 * after most of the initialization is finished.
6163 *
6164 * The DC default is LCB access on for the host. The driver defaults to
6165 * leaving access to the 8051. Assign access now - this constrains the call
6166 * to this routine to be after all LCB set-up is done. In particular, after
6167 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6168 */
6169static void init_lcb_access(struct hfi1_devdata *dd)
6170{
6171 dd->lcb_access_count = 0;
6172}
6173
6174/*
6175 * Write a response back to a 8051 request.
6176 */
6177static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6178{
6179 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006180 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6181 (u64)return_code <<
6182 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6183 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006184}
6185
6186/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006187 * Handle host requests from the 8051.
6188 *
6189 * This is a work-queue function outside of the interrupt.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006190 */
Easwar Hariharancbac3862016-02-03 14:31:31 -08006191void handle_8051_request(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006192{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006193 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6194 dc_host_req_work);
6195 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006196 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006197 u16 data = 0;
6198 u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6199 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
Mike Marciniszyn77241052015-07-30 15:17:43 -04006200
6201 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6202 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6203 return; /* no request */
6204
6205 /* zero out COMPLETED so the response is seen */
6206 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6207
6208 /* extract request details */
6209 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6210 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6211 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6212 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6213
6214 switch (type) {
6215 case HREQ_LOAD_CONFIG:
6216 case HREQ_SAVE_CONFIG:
6217 case HREQ_READ_CONFIG:
6218 case HREQ_SET_TX_EQ_ABS:
6219 case HREQ_SET_TX_EQ_REL:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006220 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006221 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006222 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6223 break;
6224
Easwar Hariharancbac3862016-02-03 14:31:31 -08006225 case HREQ_ENABLE:
6226 lanes = data & 0xF;
6227 for (i = 0; lanes; lanes >>= 1, i++) {
6228 if (!(lanes & 1))
6229 continue;
6230 if (data & 0x200) {
6231 /* enable TX CDR */
6232 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6233 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6234 cdr_ctrl_byte |= (1 << (i + 4));
6235 } else {
6236 /* disable TX CDR */
6237 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6238 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6239 cdr_ctrl_byte &= ~(1 << (i + 4));
6240 }
6241
6242 if (data & 0x800) {
6243 /* enable RX CDR */
6244 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6245 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6246 cdr_ctrl_byte |= (1 << i);
6247 } else {
6248 /* disable RX CDR */
6249 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6250 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6251 cdr_ctrl_byte &= ~(1 << i);
6252 }
6253 }
6254 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6255 &cdr_ctrl_byte, 1);
6256 hreq_response(dd, HREQ_SUCCESS, data);
6257 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6258 break;
6259
Mike Marciniszyn77241052015-07-30 15:17:43 -04006260 case HREQ_CONFIG_DONE:
6261 hreq_response(dd, HREQ_SUCCESS, 0);
6262 break;
6263
6264 case HREQ_INTERFACE_TEST:
6265 hreq_response(dd, HREQ_SUCCESS, data);
6266 break;
6267
6268 default:
6269 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6270 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6271 break;
6272 }
6273}
6274
6275static void write_global_credit(struct hfi1_devdata *dd,
6276 u8 vau, u16 total, u16 shared)
6277{
6278 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
Jubin John17fb4f22016-02-14 20:21:52 -08006279 ((u64)total <<
6280 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6281 ((u64)shared <<
6282 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6283 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006284}
6285
6286/*
6287 * Set up initial VL15 credits of the remote. Assumes the rest of
6288 * the CM credit registers are zero from a previous global or credit reset .
6289 */
6290void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6291{
6292 /* leave shared count at zero for both global and VL15 */
6293 write_global_credit(dd, vau, vl15buf, 0);
6294
6295 /* We may need some credits for another VL when sending packets
6296 * with the snoop interface. Dividing it down the middle for VL15
6297 * and VL0 should suffice.
6298 */
6299 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6300 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6301 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6302 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6303 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6304 } else {
6305 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6306 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6307 }
6308}
6309
6310/*
6311 * Zero all credit details from the previous connection and
6312 * reset the CM manager's internal counters.
6313 */
6314void reset_link_credits(struct hfi1_devdata *dd)
6315{
6316 int i;
6317
6318 /* remove all previous VL credit limits */
6319 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006320 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006321 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6322 write_global_credit(dd, 0, 0, 0);
6323 /* reset the CM block */
6324 pio_send_control(dd, PSC_CM_RESET);
6325}
6326
6327/* convert a vCU to a CU */
6328static u32 vcu_to_cu(u8 vcu)
6329{
6330 return 1 << vcu;
6331}
6332
6333/* convert a CU to a vCU */
6334static u8 cu_to_vcu(u32 cu)
6335{
6336 return ilog2(cu);
6337}
6338
6339/* convert a vAU to an AU */
6340static u32 vau_to_au(u8 vau)
6341{
6342 return 8 * (1 << vau);
6343}
6344
6345static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6346{
6347 ppd->sm_trap_qp = 0x0;
6348 ppd->sa_qp = 0x1;
6349}
6350
6351/*
6352 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6353 */
6354static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6355{
6356 u64 reg;
6357
6358 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6359 write_csr(dd, DC_LCB_CFG_RUN, 0);
6360 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6361 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006362 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006363 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6364 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6365 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006366 write_csr(dd, DCC_CFG_RESET, reg |
6367 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6368 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006369 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006370 if (!abort) {
6371 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6372 write_csr(dd, DCC_CFG_RESET, reg);
6373 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6374 }
6375}
6376
6377/*
6378 * This routine should be called after the link has been transitioned to
6379 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6380 * reset).
6381 *
6382 * The expectation is that the caller of this routine would have taken
6383 * care of properly transitioning the link into the correct state.
6384 */
6385static void dc_shutdown(struct hfi1_devdata *dd)
6386{
6387 unsigned long flags;
6388
6389 spin_lock_irqsave(&dd->dc8051_lock, flags);
6390 if (dd->dc_shutdown) {
6391 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6392 return;
6393 }
6394 dd->dc_shutdown = 1;
6395 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6396 /* Shutdown the LCB */
6397 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006398 /*
6399 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006400 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006401 * itself.
6402 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006403 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6404}
6405
Jubin John4d114fd2016-02-14 20:21:43 -08006406/*
6407 * Calling this after the DC has been brought out of reset should not
6408 * do any damage.
6409 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006410static void dc_start(struct hfi1_devdata *dd)
6411{
6412 unsigned long flags;
6413 int ret;
6414
6415 spin_lock_irqsave(&dd->dc8051_lock, flags);
6416 if (!dd->dc_shutdown)
6417 goto done;
6418 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6419 /* Take the 8051 out of reset */
6420 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6421 /* Wait until 8051 is ready */
6422 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6423 if (ret) {
6424 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006425 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006426 }
6427 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6428 write_csr(dd, DCC_CFG_RESET, 0x10);
6429 /* lcb_shutdown() with abort=1 does not restore these */
6430 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6431 spin_lock_irqsave(&dd->dc8051_lock, flags);
6432 dd->dc_shutdown = 0;
6433done:
6434 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6435}
6436
6437/*
6438 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6439 */
6440static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6441{
6442 u64 rx_radr, tx_radr;
6443 u32 version;
6444
6445 if (dd->icode != ICODE_FPGA_EMULATION)
6446 return;
6447
6448 /*
6449 * These LCB defaults on emulator _s are good, nothing to do here:
6450 * LCB_CFG_TX_FIFOS_RADR
6451 * LCB_CFG_RX_FIFOS_RADR
6452 * LCB_CFG_LN_DCLK
6453 * LCB_CFG_IGNORE_LOST_RCLK
6454 */
6455 if (is_emulator_s(dd))
6456 return;
6457 /* else this is _p */
6458
6459 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006460 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006461 version = 0x2d; /* all B0 use 0x2d or higher settings */
6462
6463 if (version <= 0x12) {
6464 /* release 0x12 and below */
6465
6466 /*
6467 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6468 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6469 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6470 */
6471 rx_radr =
6472 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6473 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6474 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6475 /*
6476 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6477 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6478 */
6479 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6480 } else if (version <= 0x18) {
6481 /* release 0x13 up to 0x18 */
6482 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6483 rx_radr =
6484 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6485 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6486 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6487 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6488 } else if (version == 0x19) {
6489 /* release 0x19 */
6490 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6491 rx_radr =
6492 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6493 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6494 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6495 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6496 } else if (version == 0x1a) {
6497 /* release 0x1a */
6498 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6499 rx_radr =
6500 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6501 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6502 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6503 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6504 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6505 } else {
6506 /* release 0x1b and higher */
6507 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6508 rx_radr =
6509 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6510 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6511 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6512 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6513 }
6514
6515 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6516 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6517 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006518 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006519 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6520}
6521
6522/*
6523 * Handle a SMA idle message
6524 *
6525 * This is a work-queue function outside of the interrupt.
6526 */
6527void handle_sma_message(struct work_struct *work)
6528{
6529 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6530 sma_message_work);
6531 struct hfi1_devdata *dd = ppd->dd;
6532 u64 msg;
6533 int ret;
6534
Jubin John4d114fd2016-02-14 20:21:43 -08006535 /*
6536 * msg is bytes 1-4 of the 40-bit idle message - the command code
6537 * is stripped off
6538 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006539 ret = read_idle_sma(dd, &msg);
6540 if (ret)
6541 return;
6542 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6543 /*
6544 * React to the SMA message. Byte[1] (0 for us) is the command.
6545 */
6546 switch (msg & 0xff) {
6547 case SMA_IDLE_ARM:
6548 /*
6549 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6550 * State Transitions
6551 *
6552 * Only expected in INIT or ARMED, discard otherwise.
6553 */
6554 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6555 ppd->neighbor_normal = 1;
6556 break;
6557 case SMA_IDLE_ACTIVE:
6558 /*
6559 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6560 * State Transitions
6561 *
6562 * Can activate the node. Discard otherwise.
6563 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006564 if (ppd->host_link_state == HLS_UP_ARMED &&
6565 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006566 ppd->neighbor_normal = 1;
6567 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6568 if (ret)
6569 dd_dev_err(
6570 dd,
6571 "%s: received Active SMA idle message, couldn't set link to Active\n",
6572 __func__);
6573 }
6574 break;
6575 default:
6576 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006577 "%s: received unexpected SMA idle message 0x%llx\n",
6578 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006579 break;
6580 }
6581}
6582
6583static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6584{
6585 u64 rcvctrl;
6586 unsigned long flags;
6587
6588 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6589 rcvctrl = read_csr(dd, RCV_CTRL);
6590 rcvctrl |= add;
6591 rcvctrl &= ~clear;
6592 write_csr(dd, RCV_CTRL, rcvctrl);
6593 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6594}
6595
6596static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6597{
6598 adjust_rcvctrl(dd, add, 0);
6599}
6600
6601static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6602{
6603 adjust_rcvctrl(dd, 0, clear);
6604}
6605
6606/*
6607 * Called from all interrupt handlers to start handling an SPC freeze.
6608 */
6609void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6610{
6611 struct hfi1_devdata *dd = ppd->dd;
6612 struct send_context *sc;
6613 int i;
6614
6615 if (flags & FREEZE_SELF)
6616 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6617
6618 /* enter frozen mode */
6619 dd->flags |= HFI1_FROZEN;
6620
6621 /* notify all SDMA engines that they are going into a freeze */
6622 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6623
6624 /* do halt pre-handling on all enabled send contexts */
6625 for (i = 0; i < dd->num_send_contexts; i++) {
6626 sc = dd->send_contexts[i].sc;
6627 if (sc && (sc->flags & SCF_ENABLED))
6628 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6629 }
6630
6631 /* Send context are frozen. Notify user space */
6632 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6633
6634 if (flags & FREEZE_ABORT) {
6635 dd_dev_err(dd,
6636 "Aborted freeze recovery. Please REBOOT system\n");
6637 return;
6638 }
6639 /* queue non-interrupt handler */
6640 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6641}
6642
6643/*
6644 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6645 * depending on the "freeze" parameter.
6646 *
6647 * No need to return an error if it times out, our only option
6648 * is to proceed anyway.
6649 */
6650static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6651{
6652 unsigned long timeout;
6653 u64 reg;
6654
6655 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6656 while (1) {
6657 reg = read_csr(dd, CCE_STATUS);
6658 if (freeze) {
6659 /* waiting until all indicators are set */
6660 if ((reg & ALL_FROZE) == ALL_FROZE)
6661 return; /* all done */
6662 } else {
6663 /* waiting until all indicators are clear */
6664 if ((reg & ALL_FROZE) == 0)
6665 return; /* all done */
6666 }
6667
6668 if (time_after(jiffies, timeout)) {
6669 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006670 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6671 freeze ? "" : "un", reg & ALL_FROZE,
6672 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006673 return;
6674 }
6675 usleep_range(80, 120);
6676 }
6677}
6678
6679/*
6680 * Do all freeze handling for the RXE block.
6681 */
6682static void rxe_freeze(struct hfi1_devdata *dd)
6683{
6684 int i;
6685
6686 /* disable port */
6687 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6688
6689 /* disable all receive contexts */
6690 for (i = 0; i < dd->num_rcv_contexts; i++)
6691 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6692}
6693
6694/*
6695 * Unfreeze handling for the RXE block - kernel contexts only.
6696 * This will also enable the port. User contexts will do unfreeze
6697 * handling on a per-context basis as they call into the driver.
6698 *
6699 */
6700static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6701{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006702 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006703 int i;
6704
6705 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006706 for (i = 0; i < dd->n_krcv_queues; i++) {
6707 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6708 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6709 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6710 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6711 hfi1_rcvctrl(dd, rcvmask, i);
6712 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006713
6714 /* enable port */
6715 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6716}
6717
6718/*
6719 * Non-interrupt SPC freeze handling.
6720 *
6721 * This is a work-queue function outside of the triggering interrupt.
6722 */
6723void handle_freeze(struct work_struct *work)
6724{
6725 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6726 freeze_work);
6727 struct hfi1_devdata *dd = ppd->dd;
6728
6729 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006730 wait_for_freeze_status(dd, 1);
6731
6732 /* SPC is now frozen */
6733
6734 /* do send PIO freeze steps */
6735 pio_freeze(dd);
6736
6737 /* do send DMA freeze steps */
6738 sdma_freeze(dd);
6739
6740 /* do send egress freeze steps - nothing to do */
6741
6742 /* do receive freeze steps */
6743 rxe_freeze(dd);
6744
6745 /*
6746 * Unfreeze the hardware - clear the freeze, wait for each
6747 * block's frozen bit to clear, then clear the frozen flag.
6748 */
6749 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6750 wait_for_freeze_status(dd, 0);
6751
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006752 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006753 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6754 wait_for_freeze_status(dd, 1);
6755 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6756 wait_for_freeze_status(dd, 0);
6757 }
6758
6759 /* do send PIO unfreeze steps for kernel contexts */
6760 pio_kernel_unfreeze(dd);
6761
6762 /* do send DMA unfreeze steps */
6763 sdma_unfreeze(dd);
6764
6765 /* do send egress unfreeze steps - nothing to do */
6766
6767 /* do receive unfreeze steps for kernel contexts */
6768 rxe_kernel_unfreeze(dd);
6769
6770 /*
6771 * The unfreeze procedure touches global device registers when
6772 * it disables and re-enables RXE. Mark the device unfrozen
6773 * after all that is done so other parts of the driver waiting
6774 * for the device to unfreeze don't do things out of order.
6775 *
6776 * The above implies that the meaning of HFI1_FROZEN flag is
6777 * "Device has gone into freeze mode and freeze mode handling
6778 * is still in progress."
6779 *
6780 * The flag will be removed when freeze mode processing has
6781 * completed.
6782 */
6783 dd->flags &= ~HFI1_FROZEN;
6784 wake_up(&dd->event_queue);
6785
6786 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006787}
6788
6789/*
6790 * Handle a link up interrupt from the 8051.
6791 *
6792 * This is a work-queue function outside of the interrupt.
6793 */
6794void handle_link_up(struct work_struct *work)
6795{
6796 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006797 link_up_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006798 set_link_state(ppd, HLS_UP_INIT);
6799
6800 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6801 read_ltp_rtt(ppd->dd);
6802 /*
6803 * OPA specifies that certain counters are cleared on a transition
6804 * to link up, so do that.
6805 */
6806 clear_linkup_counters(ppd->dd);
6807 /*
6808 * And (re)set link up default values.
6809 */
6810 set_linkup_defaults(ppd);
6811
6812 /* enforce link speed enabled */
6813 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6814 /* oops - current speed is not enabled, bounce */
6815 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006816 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6817 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006818 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006819 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006820 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006821 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006822 start_link(ppd);
6823 }
6824}
6825
Jubin John4d114fd2016-02-14 20:21:43 -08006826/*
6827 * Several pieces of LNI information were cached for SMA in ppd.
6828 * Reset these on link down
6829 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006830static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6831{
6832 ppd->neighbor_guid = 0;
6833 ppd->neighbor_port_number = 0;
6834 ppd->neighbor_type = 0;
6835 ppd->neighbor_fm_security = 0;
6836}
6837
6838/*
6839 * Handle a link down interrupt from the 8051.
6840 *
6841 * This is a work-queue function outside of the interrupt.
6842 */
6843void handle_link_down(struct work_struct *work)
6844{
6845 u8 lcl_reason, neigh_reason = 0;
6846 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6847 link_down_work);
6848
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006849 if ((ppd->host_link_state &
6850 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6851 ppd->port_type == PORT_TYPE_FIXED)
6852 ppd->offline_disabled_reason =
6853 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6854
6855 /* Go offline first, then deal with reading/writing through 8051 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006856 set_link_state(ppd, HLS_DN_OFFLINE);
6857
6858 lcl_reason = 0;
6859 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6860
6861 /*
6862 * If no reason, assume peer-initiated but missed
6863 * LinkGoingDown idle flits.
6864 */
6865 if (neigh_reason == 0)
6866 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6867
6868 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6869
6870 reset_neighbor_info(ppd);
6871
6872 /* disable the port */
6873 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6874
Jubin John4d114fd2016-02-14 20:21:43 -08006875 /*
6876 * If there is no cable attached, turn the DC off. Otherwise,
6877 * start the link bring up.
6878 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006879 if (!qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006880 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006881 } else {
6882 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006883 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006884 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006885}
6886
6887void handle_link_bounce(struct work_struct *work)
6888{
6889 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6890 link_bounce_work);
6891
6892 /*
6893 * Only do something if the link is currently up.
6894 */
6895 if (ppd->host_link_state & HLS_UP) {
6896 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006897 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006898 start_link(ppd);
6899 } else {
6900 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006901 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006902 }
6903}
6904
6905/*
6906 * Mask conversion: Capability exchange to Port LTP. The capability
6907 * exchange has an implicit 16b CRC that is mandatory.
6908 */
6909static int cap_to_port_ltp(int cap)
6910{
6911 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6912
6913 if (cap & CAP_CRC_14B)
6914 port_ltp |= PORT_LTP_CRC_MODE_14;
6915 if (cap & CAP_CRC_48B)
6916 port_ltp |= PORT_LTP_CRC_MODE_48;
6917 if (cap & CAP_CRC_12B_16B_PER_LANE)
6918 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6919
6920 return port_ltp;
6921}
6922
6923/*
6924 * Convert an OPA Port LTP mask to capability mask
6925 */
6926int port_ltp_to_cap(int port_ltp)
6927{
6928 int cap_mask = 0;
6929
6930 if (port_ltp & PORT_LTP_CRC_MODE_14)
6931 cap_mask |= CAP_CRC_14B;
6932 if (port_ltp & PORT_LTP_CRC_MODE_48)
6933 cap_mask |= CAP_CRC_48B;
6934 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6935 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6936
6937 return cap_mask;
6938}
6939
6940/*
6941 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6942 */
6943static int lcb_to_port_ltp(int lcb_crc)
6944{
6945 int port_ltp = 0;
6946
6947 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6948 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6949 else if (lcb_crc == LCB_CRC_48B)
6950 port_ltp = PORT_LTP_CRC_MODE_48;
6951 else if (lcb_crc == LCB_CRC_14B)
6952 port_ltp = PORT_LTP_CRC_MODE_14;
6953 else
6954 port_ltp = PORT_LTP_CRC_MODE_16;
6955
6956 return port_ltp;
6957}
6958
6959/*
6960 * Our neighbor has indicated that we are allowed to act as a fabric
6961 * manager, so place the full management partition key in the second
6962 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6963 * that we should already have the limited management partition key in
6964 * array element 1, and also that the port is not yet up when
6965 * add_full_mgmt_pkey() is invoked.
6966 */
6967static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6968{
6969 struct hfi1_devdata *dd = ppd->dd;
6970
Dean Luick87645222015-12-01 15:38:21 -05006971 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6972 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6973 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6974 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006975 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6976 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6977}
6978
6979/*
6980 * Convert the given link width to the OPA link width bitmask.
6981 */
6982static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6983{
6984 switch (width) {
6985 case 0:
6986 /*
6987 * Simulator and quick linkup do not set the width.
6988 * Just set it to 4x without complaint.
6989 */
6990 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6991 return OPA_LINK_WIDTH_4X;
6992 return 0; /* no lanes up */
6993 case 1: return OPA_LINK_WIDTH_1X;
6994 case 2: return OPA_LINK_WIDTH_2X;
6995 case 3: return OPA_LINK_WIDTH_3X;
6996 default:
6997 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006998 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006999 /* fall through */
7000 case 4: return OPA_LINK_WIDTH_4X;
7001 }
7002}
7003
7004/*
7005 * Do a population count on the bottom nibble.
7006 */
7007static const u8 bit_counts[16] = {
7008 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7009};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007010
Mike Marciniszyn77241052015-07-30 15:17:43 -04007011static inline u8 nibble_to_count(u8 nibble)
7012{
7013 return bit_counts[nibble & 0xf];
7014}
7015
7016/*
7017 * Read the active lane information from the 8051 registers and return
7018 * their widths.
7019 *
7020 * Active lane information is found in these 8051 registers:
7021 * enable_lane_tx
7022 * enable_lane_rx
7023 */
7024static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7025 u16 *rx_width)
7026{
7027 u16 tx, rx;
7028 u8 enable_lane_rx;
7029 u8 enable_lane_tx;
7030 u8 tx_polarity_inversion;
7031 u8 rx_polarity_inversion;
7032 u8 max_rate;
7033
7034 /* read the active lanes */
7035 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007036 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007037 read_local_lni(dd, &enable_lane_rx);
7038
7039 /* convert to counts */
7040 tx = nibble_to_count(enable_lane_tx);
7041 rx = nibble_to_count(enable_lane_rx);
7042
7043 /*
7044 * Set link_speed_active here, overriding what was set in
7045 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7046 * set the max_rate field in handle_verify_cap until v0.19.
7047 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007048 if ((dd->icode == ICODE_RTL_SILICON) &&
7049 (dd->dc8051_ver < dc8051_ver(0, 19))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007050 /* max_rate: 0 = 12.5G, 1 = 25G */
7051 switch (max_rate) {
7052 case 0:
7053 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7054 break;
7055 default:
7056 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007057 "%s: unexpected max rate %d, using 25Gb\n",
7058 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007059 /* fall through */
7060 case 1:
7061 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7062 break;
7063 }
7064 }
7065
7066 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007067 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7068 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007069 *tx_width = link_width_to_bits(dd, tx);
7070 *rx_width = link_width_to_bits(dd, rx);
7071}
7072
7073/*
7074 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7075 * Valid after the end of VerifyCap and during LinkUp. Does not change
7076 * after link up. I.e. look elsewhere for downgrade information.
7077 *
7078 * Bits are:
7079 * + bits [7:4] contain the number of active transmitters
7080 * + bits [3:0] contain the number of active receivers
7081 * These are numbers 1 through 4 and can be different values if the
7082 * link is asymmetric.
7083 *
7084 * verify_cap_local_fm_link_width[0] retains its original value.
7085 */
7086static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7087 u16 *rx_width)
7088{
7089 u16 widths, tx, rx;
7090 u8 misc_bits, local_flags;
7091 u16 active_tx, active_rx;
7092
7093 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7094 tx = widths >> 12;
7095 rx = (widths >> 8) & 0xf;
7096
7097 *tx_width = link_width_to_bits(dd, tx);
7098 *rx_width = link_width_to_bits(dd, rx);
7099
7100 /* print the active widths */
7101 get_link_widths(dd, &active_tx, &active_rx);
7102}
7103
7104/*
7105 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7106 * hardware information when the link first comes up.
7107 *
7108 * The link width is not available until after VerifyCap.AllFramesReceived
7109 * (the trigger for handle_verify_cap), so this is outside that routine
7110 * and should be called when the 8051 signals linkup.
7111 */
7112void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7113{
7114 u16 tx_width, rx_width;
7115
7116 /* get end-of-LNI link widths */
7117 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7118
7119 /* use tx_width as the link is supposed to be symmetric on link up */
7120 ppd->link_width_active = tx_width;
7121 /* link width downgrade active (LWD.A) starts out matching LW.A */
7122 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7123 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7124 /* per OPA spec, on link up LWD.E resets to LWD.S */
7125 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7126 /* cache the active egress rate (units {10^6 bits/sec]) */
7127 ppd->current_egress_rate = active_egress_rate(ppd);
7128}
7129
7130/*
7131 * Handle a verify capabilities interrupt from the 8051.
7132 *
7133 * This is a work-queue function outside of the interrupt.
7134 */
7135void handle_verify_cap(struct work_struct *work)
7136{
7137 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7138 link_vc_work);
7139 struct hfi1_devdata *dd = ppd->dd;
7140 u64 reg;
7141 u8 power_management;
7142 u8 continious;
7143 u8 vcu;
7144 u8 vau;
7145 u8 z;
7146 u16 vl15buf;
7147 u16 link_widths;
7148 u16 crc_mask;
7149 u16 crc_val;
7150 u16 device_id;
7151 u16 active_tx, active_rx;
7152 u8 partner_supported_crc;
7153 u8 remote_tx_rate;
7154 u8 device_rev;
7155
7156 set_link_state(ppd, HLS_VERIFY_CAP);
7157
7158 lcb_shutdown(dd, 0);
7159 adjust_lcb_for_fpga_serdes(dd);
7160
7161 /*
7162 * These are now valid:
7163 * remote VerifyCap fields in the general LNI config
7164 * CSR DC8051_STS_REMOTE_GUID
7165 * CSR DC8051_STS_REMOTE_NODE_TYPE
7166 * CSR DC8051_STS_REMOTE_FM_SECURITY
7167 * CSR DC8051_STS_REMOTE_PORT_NO
7168 */
7169
7170 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007171 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7172 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007173 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7174 read_remote_device_id(dd, &device_id, &device_rev);
7175 /*
7176 * And the 'MgmtAllowed' information, which is exchanged during
7177 * LNI, is also be available at this point.
7178 */
7179 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7180 /* print the active widths */
7181 get_link_widths(dd, &active_tx, &active_rx);
7182 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007183 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7184 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007185 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007186 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7187 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7188 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007189 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007190 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007191 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007192 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007193 /*
7194 * The peer vAU value just read is the peer receiver value. HFI does
7195 * not support a transmit vAU of 0 (AU == 8). We advertised that
7196 * with Z=1 in the fabric capabilities sent to the peer. The peer
7197 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7198 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7199 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7200 * subject to the Z value exception.
7201 */
7202 if (vau == 0)
7203 vau = 1;
7204 set_up_vl15(dd, vau, vl15buf);
7205
7206 /* set up the LCB CRC mode */
7207 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7208
7209 /* order is important: use the lowest bit in common */
7210 if (crc_mask & CAP_CRC_14B)
7211 crc_val = LCB_CRC_14B;
7212 else if (crc_mask & CAP_CRC_48B)
7213 crc_val = LCB_CRC_48B;
7214 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7215 crc_val = LCB_CRC_12B_16B_PER_LANE;
7216 else
7217 crc_val = LCB_CRC_16B;
7218
7219 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7220 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7221 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7222
7223 /* set (14b only) or clear sideband credit */
7224 reg = read_csr(dd, SEND_CM_CTRL);
7225 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7226 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007227 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007228 } else {
7229 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007230 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007231 }
7232
7233 ppd->link_speed_active = 0; /* invalid value */
7234 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7235 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7236 switch (remote_tx_rate) {
7237 case 0:
7238 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7239 break;
7240 case 1:
7241 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7242 break;
7243 }
7244 } else {
7245 /* actual rate is highest bit of the ANDed rates */
7246 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7247
7248 if (rate & 2)
7249 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7250 else if (rate & 1)
7251 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7252 }
7253 if (ppd->link_speed_active == 0) {
7254 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007255 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007256 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7257 }
7258
7259 /*
7260 * Cache the values of the supported, enabled, and active
7261 * LTP CRC modes to return in 'portinfo' queries. But the bit
7262 * flags that are returned in the portinfo query differ from
7263 * what's in the link_crc_mask, crc_sizes, and crc_val
7264 * variables. Convert these here.
7265 */
7266 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7267 /* supported crc modes */
7268 ppd->port_ltp_crc_mode |=
7269 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7270 /* enabled crc modes */
7271 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7272 /* active crc mode */
7273
7274 /* set up the remote credit return table */
7275 assign_remote_cm_au_table(dd, vcu);
7276
7277 /*
7278 * The LCB is reset on entry to handle_verify_cap(), so this must
7279 * be applied on every link up.
7280 *
7281 * Adjust LCB error kill enable to kill the link if
7282 * these RBUF errors are seen:
7283 * REPLAY_BUF_MBE_SMASK
7284 * FLIT_INPUT_BUF_MBE_SMASK
7285 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007286 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007287 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7288 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7289 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7290 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7291 }
7292
7293 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7294 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7295
7296 /* give 8051 access to the LCB CSRs */
7297 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7298 set_8051_lcb_access(dd);
7299
7300 ppd->neighbor_guid =
7301 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7302 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7303 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7304 ppd->neighbor_type =
7305 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7306 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7307 ppd->neighbor_fm_security =
7308 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7309 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7310 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007311 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7312 ppd->neighbor_guid, ppd->neighbor_type,
7313 ppd->mgmt_allowed, ppd->neighbor_fm_security);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007314 if (ppd->mgmt_allowed)
7315 add_full_mgmt_pkey(ppd);
7316
7317 /* tell the 8051 to go to LinkUp */
7318 set_link_state(ppd, HLS_GOING_UP);
7319}
7320
7321/*
7322 * Apply the link width downgrade enabled policy against the current active
7323 * link widths.
7324 *
7325 * Called when the enabled policy changes or the active link widths change.
7326 */
7327void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7328{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007329 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007330 int tries;
7331 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007332 u16 tx, rx;
7333
Dean Luick323fd782015-11-16 21:59:24 -05007334 /* use the hls lock to avoid a race with actual link up */
7335 tries = 0;
7336retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007337 mutex_lock(&ppd->hls_lock);
7338 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007339 if (!(ppd->host_link_state & HLS_UP)) {
7340 /* still going up..wait and retry */
7341 if (ppd->host_link_state & HLS_GOING_UP) {
7342 if (++tries < 1000) {
7343 mutex_unlock(&ppd->hls_lock);
7344 usleep_range(100, 120); /* arbitrary */
7345 goto retry;
7346 }
7347 dd_dev_err(ppd->dd,
7348 "%s: giving up waiting for link state change\n",
7349 __func__);
7350 }
7351 goto done;
7352 }
7353
7354 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007355
7356 if (refresh_widths) {
7357 get_link_widths(ppd->dd, &tx, &rx);
7358 ppd->link_width_downgrade_tx_active = tx;
7359 ppd->link_width_downgrade_rx_active = rx;
7360 }
7361
7362 if (lwde == 0) {
7363 /* downgrade is disabled */
7364
7365 /* bounce if not at starting active width */
Jubin John17fb4f22016-02-14 20:21:52 -08007366 if ((ppd->link_width_active !=
7367 ppd->link_width_downgrade_tx_active) ||
7368 (ppd->link_width_active !=
7369 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007370 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007371 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007372 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007373 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7374 ppd->link_width_active,
7375 ppd->link_width_downgrade_tx_active,
7376 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007377 do_bounce = 1;
7378 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007379 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7380 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007381 /* Tx or Rx is outside the enabled policy */
7382 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007383 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007384 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007385 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7386 lwde, ppd->link_width_downgrade_tx_active,
7387 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007388 do_bounce = 1;
7389 }
7390
Dean Luick323fd782015-11-16 21:59:24 -05007391done:
7392 mutex_unlock(&ppd->hls_lock);
7393
Mike Marciniszyn77241052015-07-30 15:17:43 -04007394 if (do_bounce) {
7395 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007396 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007397 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007398 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007399 start_link(ppd);
7400 }
7401}
7402
7403/*
7404 * Handle a link downgrade interrupt from the 8051.
7405 *
7406 * This is a work-queue function outside of the interrupt.
7407 */
7408void handle_link_downgrade(struct work_struct *work)
7409{
7410 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7411 link_downgrade_work);
7412
7413 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7414 apply_link_downgrade_policy(ppd, 1);
7415}
7416
7417static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7418{
7419 return flag_string(buf, buf_len, flags, dcc_err_flags,
7420 ARRAY_SIZE(dcc_err_flags));
7421}
7422
7423static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7424{
7425 return flag_string(buf, buf_len, flags, lcb_err_flags,
7426 ARRAY_SIZE(lcb_err_flags));
7427}
7428
7429static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7430{
7431 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7432 ARRAY_SIZE(dc8051_err_flags));
7433}
7434
7435static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7436{
7437 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7438 ARRAY_SIZE(dc8051_info_err_flags));
7439}
7440
7441static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7442{
7443 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7444 ARRAY_SIZE(dc8051_info_host_msg_flags));
7445}
7446
7447static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7448{
7449 struct hfi1_pportdata *ppd = dd->pport;
7450 u64 info, err, host_msg;
7451 int queue_link_down = 0;
7452 char buf[96];
7453
7454 /* look at the flags */
7455 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7456 /* 8051 information set by firmware */
7457 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7458 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7459 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7460 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7461 host_msg = (info >>
7462 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7463 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7464
7465 /*
7466 * Handle error flags.
7467 */
7468 if (err & FAILED_LNI) {
7469 /*
7470 * LNI error indications are cleared by the 8051
7471 * only when starting polling. Only pay attention
7472 * to them when in the states that occur during
7473 * LNI.
7474 */
7475 if (ppd->host_link_state
7476 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7477 queue_link_down = 1;
7478 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007479 dc8051_info_err_string(buf,
7480 sizeof(buf),
7481 err &
7482 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007483 }
7484 err &= ~(u64)FAILED_LNI;
7485 }
Dean Luick6d014532015-12-01 15:38:23 -05007486 /* unknown frames can happen durning LNI, just count */
7487 if (err & UNKNOWN_FRAME) {
7488 ppd->unknown_frame_count++;
7489 err &= ~(u64)UNKNOWN_FRAME;
7490 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007491 if (err) {
7492 /* report remaining errors, but do not do anything */
7493 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007494 dc8051_info_err_string(buf, sizeof(buf),
7495 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007496 }
7497
7498 /*
7499 * Handle host message flags.
7500 */
7501 if (host_msg & HOST_REQ_DONE) {
7502 /*
7503 * Presently, the driver does a busy wait for
7504 * host requests to complete. This is only an
7505 * informational message.
7506 * NOTE: The 8051 clears the host message
7507 * information *on the next 8051 command*.
7508 * Therefore, when linkup is achieved,
7509 * this flag will still be set.
7510 */
7511 host_msg &= ~(u64)HOST_REQ_DONE;
7512 }
7513 if (host_msg & BC_SMA_MSG) {
7514 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7515 host_msg &= ~(u64)BC_SMA_MSG;
7516 }
7517 if (host_msg & LINKUP_ACHIEVED) {
7518 dd_dev_info(dd, "8051: Link up\n");
7519 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7520 host_msg &= ~(u64)LINKUP_ACHIEVED;
7521 }
7522 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharancbac3862016-02-03 14:31:31 -08007523 queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007524 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7525 }
7526 if (host_msg & VERIFY_CAP_FRAME) {
7527 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7528 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7529 }
7530 if (host_msg & LINK_GOING_DOWN) {
7531 const char *extra = "";
7532 /* no downgrade action needed if going down */
7533 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7534 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7535 extra = " (ignoring downgrade)";
7536 }
7537 dd_dev_info(dd, "8051: Link down%s\n", extra);
7538 queue_link_down = 1;
7539 host_msg &= ~(u64)LINK_GOING_DOWN;
7540 }
7541 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7542 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7543 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7544 }
7545 if (host_msg) {
7546 /* report remaining messages, but do not do anything */
7547 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007548 dc8051_info_host_msg_string(buf,
7549 sizeof(buf),
7550 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007551 }
7552
7553 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7554 }
7555 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7556 /*
7557 * Lost the 8051 heartbeat. If this happens, we
7558 * receive constant interrupts about it. Disable
7559 * the interrupt after the first.
7560 */
7561 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7562 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007563 read_csr(dd, DC_DC8051_ERR_EN) &
7564 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007565
7566 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7567 }
7568 if (reg) {
7569 /* report the error, but do not do anything */
7570 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007571 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007572 }
7573
7574 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007575 /*
7576 * if the link is already going down or disabled, do not
7577 * queue another
7578 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007579 if ((ppd->host_link_state &
7580 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7581 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007582 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007583 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007584 } else {
7585 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7586 }
7587 }
7588}
7589
7590static const char * const fm_config_txt[] = {
7591[0] =
7592 "BadHeadDist: Distance violation between two head flits",
7593[1] =
7594 "BadTailDist: Distance violation between two tail flits",
7595[2] =
7596 "BadCtrlDist: Distance violation between two credit control flits",
7597[3] =
7598 "BadCrdAck: Credits return for unsupported VL",
7599[4] =
7600 "UnsupportedVLMarker: Received VL Marker",
7601[5] =
7602 "BadPreempt: Exceeded the preemption nesting level",
7603[6] =
7604 "BadControlFlit: Received unsupported control flit",
7605/* no 7 */
7606[8] =
7607 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7608};
7609
7610static const char * const port_rcv_txt[] = {
7611[1] =
7612 "BadPktLen: Illegal PktLen",
7613[2] =
7614 "PktLenTooLong: Packet longer than PktLen",
7615[3] =
7616 "PktLenTooShort: Packet shorter than PktLen",
7617[4] =
7618 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7619[5] =
7620 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7621[6] =
7622 "BadL2: Illegal L2 opcode",
7623[7] =
7624 "BadSC: Unsupported SC",
7625[9] =
7626 "BadRC: Illegal RC",
7627[11] =
7628 "PreemptError: Preempting with same VL",
7629[12] =
7630 "PreemptVL15: Preempting a VL15 packet",
7631};
7632
7633#define OPA_LDR_FMCONFIG_OFFSET 16
7634#define OPA_LDR_PORTRCV_OFFSET 0
7635static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7636{
7637 u64 info, hdr0, hdr1;
7638 const char *extra;
7639 char buf[96];
7640 struct hfi1_pportdata *ppd = dd->pport;
7641 u8 lcl_reason = 0;
7642 int do_bounce = 0;
7643
7644 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7645 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7646 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7647 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7648 /* set status bit */
7649 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7650 }
7651 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7652 }
7653
7654 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7655 struct hfi1_pportdata *ppd = dd->pport;
7656 /* this counter saturates at (2^32) - 1 */
7657 if (ppd->link_downed < (u32)UINT_MAX)
7658 ppd->link_downed++;
7659 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7660 }
7661
7662 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7663 u8 reason_valid = 1;
7664
7665 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7666 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7667 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7668 /* set status bit */
7669 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7670 }
7671 switch (info) {
7672 case 0:
7673 case 1:
7674 case 2:
7675 case 3:
7676 case 4:
7677 case 5:
7678 case 6:
7679 extra = fm_config_txt[info];
7680 break;
7681 case 8:
7682 extra = fm_config_txt[info];
7683 if (ppd->port_error_action &
7684 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7685 do_bounce = 1;
7686 /*
7687 * lcl_reason cannot be derived from info
7688 * for this error
7689 */
7690 lcl_reason =
7691 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7692 }
7693 break;
7694 default:
7695 reason_valid = 0;
7696 snprintf(buf, sizeof(buf), "reserved%lld", info);
7697 extra = buf;
7698 break;
7699 }
7700
7701 if (reason_valid && !do_bounce) {
7702 do_bounce = ppd->port_error_action &
7703 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7704 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7705 }
7706
7707 /* just report this */
7708 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7709 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7710 }
7711
7712 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7713 u8 reason_valid = 1;
7714
7715 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7716 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7717 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7718 if (!(dd->err_info_rcvport.status_and_code &
7719 OPA_EI_STATUS_SMASK)) {
7720 dd->err_info_rcvport.status_and_code =
7721 info & OPA_EI_CODE_SMASK;
7722 /* set status bit */
7723 dd->err_info_rcvport.status_and_code |=
7724 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007725 /*
7726 * save first 2 flits in the packet that caused
7727 * the error
7728 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007729 dd->err_info_rcvport.packet_flit1 = hdr0;
7730 dd->err_info_rcvport.packet_flit2 = hdr1;
7731 }
7732 switch (info) {
7733 case 1:
7734 case 2:
7735 case 3:
7736 case 4:
7737 case 5:
7738 case 6:
7739 case 7:
7740 case 9:
7741 case 11:
7742 case 12:
7743 extra = port_rcv_txt[info];
7744 break;
7745 default:
7746 reason_valid = 0;
7747 snprintf(buf, sizeof(buf), "reserved%lld", info);
7748 extra = buf;
7749 break;
7750 }
7751
7752 if (reason_valid && !do_bounce) {
7753 do_bounce = ppd->port_error_action &
7754 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7755 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7756 }
7757
7758 /* just report this */
7759 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7760 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007761 hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007762
7763 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7764 }
7765
7766 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7767 /* informative only */
7768 dd_dev_info(dd, "8051 access to LCB blocked\n");
7769 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7770 }
7771 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7772 /* informative only */
7773 dd_dev_info(dd, "host access to LCB blocked\n");
7774 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7775 }
7776
7777 /* report any remaining errors */
7778 if (reg)
7779 dd_dev_info(dd, "DCC Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007780 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007781
7782 if (lcl_reason == 0)
7783 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7784
7785 if (do_bounce) {
7786 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7787 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7788 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7789 }
7790}
7791
7792static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7793{
7794 char buf[96];
7795
7796 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007797 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007798}
7799
7800/*
7801 * CCE block DC interrupt. Source is < 8.
7802 */
7803static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7804{
7805 const struct err_reg_info *eri = &dc_errs[source];
7806
7807 if (eri->handler) {
7808 interrupt_clear_down(dd, 0, eri);
7809 } else if (source == 3 /* dc_lbm_int */) {
7810 /*
7811 * This indicates that a parity error has occurred on the
7812 * address/control lines presented to the LBM. The error
7813 * is a single pulse, there is no associated error flag,
7814 * and it is non-maskable. This is because if a parity
7815 * error occurs on the request the request is dropped.
7816 * This should never occur, but it is nice to know if it
7817 * ever does.
7818 */
7819 dd_dev_err(dd, "Parity error in DC LBM block\n");
7820 } else {
7821 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7822 }
7823}
7824
7825/*
7826 * TX block send credit interrupt. Source is < 160.
7827 */
7828static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7829{
7830 sc_group_release_update(dd, source);
7831}
7832
7833/*
7834 * TX block SDMA interrupt. Source is < 48.
7835 *
7836 * SDMA interrupts are grouped by type:
7837 *
7838 * 0 - N-1 = SDma
7839 * N - 2N-1 = SDmaProgress
7840 * 2N - 3N-1 = SDmaIdle
7841 */
7842static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7843{
7844 /* what interrupt */
7845 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7846 /* which engine */
7847 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7848
7849#ifdef CONFIG_SDMA_VERBOSITY
7850 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7851 slashstrip(__FILE__), __LINE__, __func__);
7852 sdma_dumpstate(&dd->per_sdma[which]);
7853#endif
7854
7855 if (likely(what < 3 && which < dd->num_sdma)) {
7856 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7857 } else {
7858 /* should not happen */
7859 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7860 }
7861}
7862
7863/*
7864 * RX block receive available interrupt. Source is < 160.
7865 */
7866static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7867{
7868 struct hfi1_ctxtdata *rcd;
7869 char *err_detail;
7870
7871 if (likely(source < dd->num_rcv_contexts)) {
7872 rcd = dd->rcd[source];
7873 if (rcd) {
7874 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007875 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007876 else
7877 handle_user_interrupt(rcd);
7878 return; /* OK */
7879 }
7880 /* received an interrupt, but no rcd */
7881 err_detail = "dataless";
7882 } else {
7883 /* received an interrupt, but are not using that context */
7884 err_detail = "out of range";
7885 }
7886 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007887 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007888}
7889
7890/*
7891 * RX block receive urgent interrupt. Source is < 160.
7892 */
7893static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7894{
7895 struct hfi1_ctxtdata *rcd;
7896 char *err_detail;
7897
7898 if (likely(source < dd->num_rcv_contexts)) {
7899 rcd = dd->rcd[source];
7900 if (rcd) {
7901 /* only pay attention to user urgent interrupts */
7902 if (source >= dd->first_user_ctxt)
7903 handle_user_interrupt(rcd);
7904 return; /* OK */
7905 }
7906 /* received an interrupt, but no rcd */
7907 err_detail = "dataless";
7908 } else {
7909 /* received an interrupt, but are not using that context */
7910 err_detail = "out of range";
7911 }
7912 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007913 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007914}
7915
7916/*
7917 * Reserved range interrupt. Should not be called in normal operation.
7918 */
7919static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7920{
7921 char name[64];
7922
7923 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007924 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007925}
7926
7927static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08007928/*
7929 * start end
7930 * name func interrupt func
7931 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007932{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7933 is_misc_err_name, is_misc_err_int },
7934{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7935 is_sdma_eng_err_name, is_sdma_eng_err_int },
7936{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7937 is_sendctxt_err_name, is_sendctxt_err_int },
7938{ IS_SDMA_START, IS_SDMA_END,
7939 is_sdma_eng_name, is_sdma_eng_int },
7940{ IS_VARIOUS_START, IS_VARIOUS_END,
7941 is_various_name, is_various_int },
7942{ IS_DC_START, IS_DC_END,
7943 is_dc_name, is_dc_int },
7944{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7945 is_rcv_avail_name, is_rcv_avail_int },
7946{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7947 is_rcv_urgent_name, is_rcv_urgent_int },
7948{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7949 is_send_credit_name, is_send_credit_int},
7950{ IS_RESERVED_START, IS_RESERVED_END,
7951 is_reserved_name, is_reserved_int},
7952};
7953
7954/*
7955 * Interrupt source interrupt - called when the given source has an interrupt.
7956 * Source is a bit index into an array of 64-bit integers.
7957 */
7958static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7959{
7960 const struct is_table *entry;
7961
7962 /* avoids a double compare by walking the table in-order */
7963 for (entry = &is_table[0]; entry->is_name; entry++) {
7964 if (source < entry->end) {
7965 trace_hfi1_interrupt(dd, entry, source);
7966 entry->is_int(dd, source - entry->start);
7967 return;
7968 }
7969 }
7970 /* fell off the end */
7971 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7972}
7973
7974/*
7975 * General interrupt handler. This is able to correctly handle
7976 * all interrupts in case INTx is used.
7977 */
7978static irqreturn_t general_interrupt(int irq, void *data)
7979{
7980 struct hfi1_devdata *dd = data;
7981 u64 regs[CCE_NUM_INT_CSRS];
7982 u32 bit;
7983 int i;
7984
7985 this_cpu_inc(*dd->int_counter);
7986
7987 /* phase 1: scan and clear all handled interrupts */
7988 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7989 if (dd->gi_mask[i] == 0) {
7990 regs[i] = 0; /* used later */
7991 continue;
7992 }
7993 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7994 dd->gi_mask[i];
7995 /* only clear if anything is set */
7996 if (regs[i])
7997 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7998 }
7999
8000 /* phase 2: call the appropriate handler */
8001 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008002 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008003 is_interrupt(dd, bit);
8004 }
8005
8006 return IRQ_HANDLED;
8007}
8008
8009static irqreturn_t sdma_interrupt(int irq, void *data)
8010{
8011 struct sdma_engine *sde = data;
8012 struct hfi1_devdata *dd = sde->dd;
8013 u64 status;
8014
8015#ifdef CONFIG_SDMA_VERBOSITY
8016 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8017 slashstrip(__FILE__), __LINE__, __func__);
8018 sdma_dumpstate(sde);
8019#endif
8020
8021 this_cpu_inc(*dd->int_counter);
8022
8023 /* This read_csr is really bad in the hot path */
8024 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008025 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8026 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008027 if (likely(status)) {
8028 /* clear the interrupt(s) */
8029 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008030 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8031 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008032
8033 /* handle the interrupt(s) */
8034 sdma_engine_interrupt(sde, status);
8035 } else
8036 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008037 sde->this_idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008038
8039 return IRQ_HANDLED;
8040}
8041
8042/*
Dean Luickecd42f82016-02-03 14:35:14 -08008043 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8044 * to insure that the write completed. This does NOT guarantee that
8045 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008046 */
8047static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8048{
8049 struct hfi1_devdata *dd = rcd->dd;
8050 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8051
8052 mmiowb(); /* make sure everything before is written */
8053 write_csr(dd, addr, rcd->imask);
8054 /* force the above write on the chip and get a value back */
8055 (void)read_csr(dd, addr);
8056}
8057
8058/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008059void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008060{
8061 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8062}
8063
Dean Luickecd42f82016-02-03 14:35:14 -08008064/*
8065 * Return non-zero if a packet is present.
8066 *
8067 * This routine is called when rechecking for packets after the RcvAvail
8068 * interrupt has been cleared down. First, do a quick check of memory for
8069 * a packet present. If not found, use an expensive CSR read of the context
8070 * tail to determine the actual tail. The CSR read is necessary because there
8071 * is no method to push pending DMAs to memory other than an interrupt and we
8072 * are trying to determine if we need to force an interrupt.
8073 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008074static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8075{
Dean Luickecd42f82016-02-03 14:35:14 -08008076 u32 tail;
8077 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008078
Dean Luickecd42f82016-02-03 14:35:14 -08008079 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8080 present = (rcd->seq_cnt ==
8081 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8082 else /* is RDMA rtail */
8083 present = (rcd->head != get_rcvhdrtail(rcd));
8084
8085 if (present)
8086 return 1;
8087
8088 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8089 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8090 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008091}
8092
8093/*
8094 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8095 * This routine will try to handle packets immediately (latency), but if
8096 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008097 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008098 * invoked) is finished. The intent is to avoid extra interrupts while we
8099 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008100 */
8101static irqreturn_t receive_context_interrupt(int irq, void *data)
8102{
8103 struct hfi1_ctxtdata *rcd = data;
8104 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008105 int disposition;
8106 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008107
8108 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8109 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008110 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008111
Dean Luickf4f30031c2015-10-26 10:28:44 -04008112 /* receive interrupt remains blocked while processing packets */
8113 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008114
Dean Luickf4f30031c2015-10-26 10:28:44 -04008115 /*
8116 * Too many packets were seen while processing packets in this
8117 * IRQ handler. Invoke the handler thread. The receive interrupt
8118 * remains blocked.
8119 */
8120 if (disposition == RCV_PKT_LIMIT)
8121 return IRQ_WAKE_THREAD;
8122
8123 /*
8124 * The packet processor detected no more packets. Clear the receive
8125 * interrupt and recheck for a packet packet that may have arrived
8126 * after the previous check and interrupt clear. If a packet arrived,
8127 * force another interrupt.
8128 */
8129 clear_recv_intr(rcd);
8130 present = check_packet_present(rcd);
8131 if (present)
8132 force_recv_intr(rcd);
8133
8134 return IRQ_HANDLED;
8135}
8136
8137/*
8138 * Receive packet thread handler. This expects to be invoked with the
8139 * receive interrupt still blocked.
8140 */
8141static irqreturn_t receive_context_thread(int irq, void *data)
8142{
8143 struct hfi1_ctxtdata *rcd = data;
8144 int present;
8145
8146 /* receive interrupt is still blocked from the IRQ handler */
8147 (void)rcd->do_interrupt(rcd, 1);
8148
8149 /*
8150 * The packet processor will only return if it detected no more
8151 * packets. Hold IRQs here so we can safely clear the interrupt and
8152 * recheck for a packet that may have arrived after the previous
8153 * check and the interrupt clear. If a packet arrived, force another
8154 * interrupt.
8155 */
8156 local_irq_disable();
8157 clear_recv_intr(rcd);
8158 present = check_packet_present(rcd);
8159 if (present)
8160 force_recv_intr(rcd);
8161 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008162
8163 return IRQ_HANDLED;
8164}
8165
8166/* ========================================================================= */
8167
8168u32 read_physical_state(struct hfi1_devdata *dd)
8169{
8170 u64 reg;
8171
8172 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8173 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8174 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8175}
8176
Jim Snowfb9036d2016-01-11 18:32:21 -05008177u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008178{
8179 u64 reg;
8180
8181 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8182 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8183 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8184}
8185
8186static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8187{
8188 u64 reg;
8189
8190 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8191 /* clear current state, set new state */
8192 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8193 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8194 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8195}
8196
8197/*
8198 * Use the 8051 to read a LCB CSR.
8199 */
8200static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8201{
8202 u32 regno;
8203 int ret;
8204
8205 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8206 if (acquire_lcb_access(dd, 0) == 0) {
8207 *data = read_csr(dd, addr);
8208 release_lcb_access(dd, 0);
8209 return 0;
8210 }
8211 return -EBUSY;
8212 }
8213
8214 /* register is an index of LCB registers: (offset - base) / 8 */
8215 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8216 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8217 if (ret != HCMD_SUCCESS)
8218 return -EBUSY;
8219 return 0;
8220}
8221
8222/*
8223 * Read an LCB CSR. Access may not be in host control, so check.
8224 * Return 0 on success, -EBUSY on failure.
8225 */
8226int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8227{
8228 struct hfi1_pportdata *ppd = dd->pport;
8229
8230 /* if up, go through the 8051 for the value */
8231 if (ppd->host_link_state & HLS_UP)
8232 return read_lcb_via_8051(dd, addr, data);
8233 /* if going up or down, no access */
8234 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8235 return -EBUSY;
8236 /* otherwise, host has access */
8237 *data = read_csr(dd, addr);
8238 return 0;
8239}
8240
8241/*
8242 * Use the 8051 to write a LCB CSR.
8243 */
8244static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8245{
Dean Luick3bf40d62015-11-06 20:07:04 -05008246 u32 regno;
8247 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008248
Dean Luick3bf40d62015-11-06 20:07:04 -05008249 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8250 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8251 if (acquire_lcb_access(dd, 0) == 0) {
8252 write_csr(dd, addr, data);
8253 release_lcb_access(dd, 0);
8254 return 0;
8255 }
8256 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008257 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008258
8259 /* register is an index of LCB registers: (offset - base) / 8 */
8260 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8261 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8262 if (ret != HCMD_SUCCESS)
8263 return -EBUSY;
8264 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008265}
8266
8267/*
8268 * Write an LCB CSR. Access may not be in host control, so check.
8269 * Return 0 on success, -EBUSY on failure.
8270 */
8271int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8272{
8273 struct hfi1_pportdata *ppd = dd->pport;
8274
8275 /* if up, go through the 8051 for the value */
8276 if (ppd->host_link_state & HLS_UP)
8277 return write_lcb_via_8051(dd, addr, data);
8278 /* if going up or down, no access */
8279 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8280 return -EBUSY;
8281 /* otherwise, host has access */
8282 write_csr(dd, addr, data);
8283 return 0;
8284}
8285
8286/*
8287 * Returns:
8288 * < 0 = Linux error, not able to get access
8289 * > 0 = 8051 command RETURN_CODE
8290 */
8291static int do_8051_command(
8292 struct hfi1_devdata *dd,
8293 u32 type,
8294 u64 in_data,
8295 u64 *out_data)
8296{
8297 u64 reg, completed;
8298 int return_code;
8299 unsigned long flags;
8300 unsigned long timeout;
8301
8302 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8303
8304 /*
8305 * Alternative to holding the lock for a long time:
8306 * - keep busy wait - have other users bounce off
8307 */
8308 spin_lock_irqsave(&dd->dc8051_lock, flags);
8309
8310 /* We can't send any commands to the 8051 if it's in reset */
8311 if (dd->dc_shutdown) {
8312 return_code = -ENODEV;
8313 goto fail;
8314 }
8315
8316 /*
8317 * If an 8051 host command timed out previously, then the 8051 is
8318 * stuck.
8319 *
8320 * On first timeout, attempt to reset and restart the entire DC
8321 * block (including 8051). (Is this too big of a hammer?)
8322 *
8323 * If the 8051 times out a second time, the reset did not bring it
8324 * back to healthy life. In that case, fail any subsequent commands.
8325 */
8326 if (dd->dc8051_timed_out) {
8327 if (dd->dc8051_timed_out > 1) {
8328 dd_dev_err(dd,
8329 "Previous 8051 host command timed out, skipping command %u\n",
8330 type);
8331 return_code = -ENXIO;
8332 goto fail;
8333 }
8334 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8335 dc_shutdown(dd);
8336 dc_start(dd);
8337 spin_lock_irqsave(&dd->dc8051_lock, flags);
8338 }
8339
8340 /*
8341 * If there is no timeout, then the 8051 command interface is
8342 * waiting for a command.
8343 */
8344
8345 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008346 * When writing a LCB CSR, out_data contains the full value to
8347 * to be written, while in_data contains the relative LCB
8348 * address in 7:0. Do the work here, rather than the caller,
8349 * of distrubting the write data to where it needs to go:
8350 *
8351 * Write data
8352 * 39:00 -> in_data[47:8]
8353 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8354 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8355 */
8356 if (type == HCMD_WRITE_LCB_CSR) {
8357 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8358 reg = ((((*out_data) >> 40) & 0xff) <<
8359 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8360 | ((((*out_data) >> 48) & 0xffff) <<
8361 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8362 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8363 }
8364
8365 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008366 * Do two writes: the first to stabilize the type and req_data, the
8367 * second to activate.
8368 */
8369 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8370 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8371 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8372 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8373 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8374 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8375 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8376
8377 /* wait for completion, alternate: interrupt */
8378 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8379 while (1) {
8380 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8381 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8382 if (completed)
8383 break;
8384 if (time_after(jiffies, timeout)) {
8385 dd->dc8051_timed_out++;
8386 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8387 if (out_data)
8388 *out_data = 0;
8389 return_code = -ETIMEDOUT;
8390 goto fail;
8391 }
8392 udelay(2);
8393 }
8394
8395 if (out_data) {
8396 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8397 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8398 if (type == HCMD_READ_LCB_CSR) {
8399 /* top 16 bits are in a different register */
8400 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8401 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8402 << (48
8403 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8404 }
8405 }
8406 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8407 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8408 dd->dc8051_timed_out = 0;
8409 /*
8410 * Clear command for next user.
8411 */
8412 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8413
8414fail:
8415 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8416
8417 return return_code;
8418}
8419
8420static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8421{
8422 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8423}
8424
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008425int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8426 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008427{
8428 u64 data;
8429 int ret;
8430
8431 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8432 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8433 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8434 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8435 if (ret != HCMD_SUCCESS) {
8436 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008437 "load 8051 config: field id %d, lane %d, err %d\n",
8438 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008439 }
8440 return ret;
8441}
8442
8443/*
8444 * Read the 8051 firmware "registers". Use the RAM directly. Always
8445 * set the result, even on error.
8446 * Return 0 on success, -errno on failure
8447 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008448int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8449 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008450{
8451 u64 big_data;
8452 u32 addr;
8453 int ret;
8454
8455 /* address start depends on the lane_id */
8456 if (lane_id < 4)
8457 addr = (4 * NUM_GENERAL_FIELDS)
8458 + (lane_id * 4 * NUM_LANE_FIELDS);
8459 else
8460 addr = 0;
8461 addr += field_id * 4;
8462
8463 /* read is in 8-byte chunks, hardware will truncate the address down */
8464 ret = read_8051_data(dd, addr, 8, &big_data);
8465
8466 if (ret == 0) {
8467 /* extract the 4 bytes we want */
8468 if (addr & 0x4)
8469 *result = (u32)(big_data >> 32);
8470 else
8471 *result = (u32)big_data;
8472 } else {
8473 *result = 0;
8474 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008475 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008476 }
8477
8478 return ret;
8479}
8480
8481static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8482 u8 continuous)
8483{
8484 u32 frame;
8485
8486 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8487 | power_management << POWER_MANAGEMENT_SHIFT;
8488 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8489 GENERAL_CONFIG, frame);
8490}
8491
8492static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8493 u16 vl15buf, u8 crc_sizes)
8494{
8495 u32 frame;
8496
8497 frame = (u32)vau << VAU_SHIFT
8498 | (u32)z << Z_SHIFT
8499 | (u32)vcu << VCU_SHIFT
8500 | (u32)vl15buf << VL15BUF_SHIFT
8501 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8502 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8503 GENERAL_CONFIG, frame);
8504}
8505
8506static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8507 u8 *flag_bits, u16 *link_widths)
8508{
8509 u32 frame;
8510
8511 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008512 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008513 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8514 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8515 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8516}
8517
8518static int write_vc_local_link_width(struct hfi1_devdata *dd,
8519 u8 misc_bits,
8520 u8 flag_bits,
8521 u16 link_widths)
8522{
8523 u32 frame;
8524
8525 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8526 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8527 | (u32)link_widths << LINK_WIDTH_SHIFT;
8528 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8529 frame);
8530}
8531
8532static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8533 u8 device_rev)
8534{
8535 u32 frame;
8536
8537 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8538 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8539 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8540}
8541
8542static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8543 u8 *device_rev)
8544{
8545 u32 frame;
8546
8547 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8548 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8549 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8550 & REMOTE_DEVICE_REV_MASK;
8551}
8552
8553void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8554{
8555 u32 frame;
8556
8557 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8558 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8559 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8560}
8561
8562static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8563 u8 *continuous)
8564{
8565 u32 frame;
8566
8567 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8568 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8569 & POWER_MANAGEMENT_MASK;
8570 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8571 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8572}
8573
8574static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8575 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8576{
8577 u32 frame;
8578
8579 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8580 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8581 *z = (frame >> Z_SHIFT) & Z_MASK;
8582 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8583 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8584 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8585}
8586
8587static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8588 u8 *remote_tx_rate,
8589 u16 *link_widths)
8590{
8591 u32 frame;
8592
8593 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008594 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008595 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8596 & REMOTE_TX_RATE_MASK;
8597 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8598}
8599
8600static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8601{
8602 u32 frame;
8603
8604 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8605 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8606}
8607
8608static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8609{
8610 u32 frame;
8611
8612 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8613 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8614}
8615
8616static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8617{
8618 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8619}
8620
8621static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8622{
8623 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8624}
8625
8626void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8627{
8628 u32 frame;
8629 int ret;
8630
8631 *link_quality = 0;
8632 if (dd->pport->host_link_state & HLS_UP) {
8633 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008634 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008635 if (ret == 0)
8636 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8637 & LINK_QUALITY_MASK;
8638 }
8639}
8640
8641static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8642{
8643 u32 frame;
8644
8645 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8646 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8647}
8648
8649static int read_tx_settings(struct hfi1_devdata *dd,
8650 u8 *enable_lane_tx,
8651 u8 *tx_polarity_inversion,
8652 u8 *rx_polarity_inversion,
8653 u8 *max_rate)
8654{
8655 u32 frame;
8656 int ret;
8657
8658 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8659 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8660 & ENABLE_LANE_TX_MASK;
8661 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8662 & TX_POLARITY_INVERSION_MASK;
8663 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8664 & RX_POLARITY_INVERSION_MASK;
8665 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8666 return ret;
8667}
8668
8669static int write_tx_settings(struct hfi1_devdata *dd,
8670 u8 enable_lane_tx,
8671 u8 tx_polarity_inversion,
8672 u8 rx_polarity_inversion,
8673 u8 max_rate)
8674{
8675 u32 frame;
8676
8677 /* no need to mask, all variable sizes match field widths */
8678 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8679 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8680 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8681 | max_rate << MAX_RATE_SHIFT;
8682 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8683}
8684
8685static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8686{
8687 u32 frame, version, prod_id;
8688 int ret, lane;
8689
8690 /* 4 lanes */
8691 for (lane = 0; lane < 4; lane++) {
8692 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8693 if (ret) {
Jubin John17fb4f22016-02-14 20:21:52 -08008694 dd_dev_err(dd,
8695 "Unable to read lane %d firmware details\n",
8696 lane);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008697 continue;
8698 }
8699 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8700 & SPICO_ROM_VERSION_MASK;
8701 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8702 & SPICO_ROM_PROD_ID_MASK;
8703 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008704 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8705 lane, version, prod_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008706 }
8707}
8708
8709/*
8710 * Read an idle LCB message.
8711 *
8712 * Returns 0 on success, -EINVAL on error
8713 */
8714static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8715{
8716 int ret;
8717
Jubin John17fb4f22016-02-14 20:21:52 -08008718 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008719 if (ret != HCMD_SUCCESS) {
8720 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008721 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008722 return -EINVAL;
8723 }
8724 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8725 /* return only the payload as we already know the type */
8726 *data_out >>= IDLE_PAYLOAD_SHIFT;
8727 return 0;
8728}
8729
8730/*
8731 * Read an idle SMA message. To be done in response to a notification from
8732 * the 8051.
8733 *
8734 * Returns 0 on success, -EINVAL on error
8735 */
8736static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8737{
Jubin John17fb4f22016-02-14 20:21:52 -08008738 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8739 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008740}
8741
8742/*
8743 * Send an idle LCB message.
8744 *
8745 * Returns 0 on success, -EINVAL on error
8746 */
8747static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8748{
8749 int ret;
8750
8751 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8752 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8753 if (ret != HCMD_SUCCESS) {
8754 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008755 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008756 return -EINVAL;
8757 }
8758 return 0;
8759}
8760
8761/*
8762 * Send an idle SMA message.
8763 *
8764 * Returns 0 on success, -EINVAL on error
8765 */
8766int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8767{
8768 u64 data;
8769
Jubin John17fb4f22016-02-14 20:21:52 -08008770 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8771 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008772 return send_idle_message(dd, data);
8773}
8774
8775/*
8776 * Initialize the LCB then do a quick link up. This may or may not be
8777 * in loopback.
8778 *
8779 * return 0 on success, -errno on error
8780 */
8781static int do_quick_linkup(struct hfi1_devdata *dd)
8782{
8783 u64 reg;
8784 unsigned long timeout;
8785 int ret;
8786
8787 lcb_shutdown(dd, 0);
8788
8789 if (loopback) {
8790 /* LCB_CFG_LOOPBACK.VAL = 2 */
8791 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8792 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08008793 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008794 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8795 }
8796
8797 /* start the LCBs */
8798 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8799 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8800
8801 /* simulator only loopback steps */
8802 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8803 /* LCB_CFG_RUN.EN = 1 */
8804 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08008805 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008806
8807 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8808 timeout = jiffies + msecs_to_jiffies(10);
8809 while (1) {
Jubin John17fb4f22016-02-14 20:21:52 -08008810 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008811 if (reg)
8812 break;
8813 if (time_after(jiffies, timeout)) {
8814 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008815 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008816 return -ETIMEDOUT;
8817 }
8818 udelay(2);
8819 }
8820
8821 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08008822 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008823 }
8824
8825 if (!loopback) {
8826 /*
8827 * When doing quick linkup and not in loopback, both
8828 * sides must be done with LCB set-up before either
8829 * starts the quick linkup. Put a delay here so that
8830 * both sides can be started and have a chance to be
8831 * done with LCB set up before resuming.
8832 */
8833 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008834 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008835 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08008836 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008837 }
8838
8839 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8840 set_8051_lcb_access(dd);
8841
8842 /*
8843 * State "quick" LinkUp request sets the physical link state to
8844 * LinkUp without a verify capability sequence.
8845 * This state is in simulator v37 and later.
8846 */
8847 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8848 if (ret != HCMD_SUCCESS) {
8849 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008850 "%s: set physical link state to quick LinkUp failed with return %d\n",
8851 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008852
8853 set_host_lcb_access(dd);
8854 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8855
8856 if (ret >= 0)
8857 ret = -EINVAL;
8858 return ret;
8859 }
8860
8861 return 0; /* success */
8862}
8863
8864/*
8865 * Set the SerDes to internal loopback mode.
8866 * Returns 0 on success, -errno on error.
8867 */
8868static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8869{
8870 int ret;
8871
8872 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8873 if (ret == HCMD_SUCCESS)
8874 return 0;
8875 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008876 "Set physical link state to SerDes Loopback failed with return %d\n",
8877 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008878 if (ret >= 0)
8879 ret = -EINVAL;
8880 return ret;
8881}
8882
8883/*
8884 * Do all special steps to set up loopback.
8885 */
8886static int init_loopback(struct hfi1_devdata *dd)
8887{
8888 dd_dev_info(dd, "Entering loopback mode\n");
8889
8890 /* all loopbacks should disable self GUID check */
8891 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08008892 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008893
8894 /*
8895 * The simulator has only one loopback option - LCB. Switch
8896 * to that option, which includes quick link up.
8897 *
8898 * Accept all valid loopback values.
8899 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08008900 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
8901 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
8902 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008903 loopback = LOOPBACK_LCB;
8904 quick_linkup = 1;
8905 return 0;
8906 }
8907
8908 /* handle serdes loopback */
8909 if (loopback == LOOPBACK_SERDES) {
8910 /* internal serdes loopack needs quick linkup on RTL */
8911 if (dd->icode == ICODE_RTL_SILICON)
8912 quick_linkup = 1;
8913 return set_serdes_loopback_mode(dd);
8914 }
8915
8916 /* LCB loopback - handled at poll time */
8917 if (loopback == LOOPBACK_LCB) {
8918 quick_linkup = 1; /* LCB is always quick linkup */
8919
8920 /* not supported in emulation due to emulation RTL changes */
8921 if (dd->icode == ICODE_FPGA_EMULATION) {
8922 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008923 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008924 return -EINVAL;
8925 }
8926 return 0;
8927 }
8928
8929 /* external cable loopback requires no extra steps */
8930 if (loopback == LOOPBACK_CABLE)
8931 return 0;
8932
8933 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8934 return -EINVAL;
8935}
8936
8937/*
8938 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8939 * used in the Verify Capability link width attribute.
8940 */
8941static u16 opa_to_vc_link_widths(u16 opa_widths)
8942{
8943 int i;
8944 u16 result = 0;
8945
8946 static const struct link_bits {
8947 u16 from;
8948 u16 to;
8949 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08008950 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
8951 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
8952 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
8953 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04008954 };
8955
8956 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8957 if (opa_widths & opa_link_xlate[i].from)
8958 result |= opa_link_xlate[i].to;
8959 }
8960 return result;
8961}
8962
8963/*
8964 * Set link attributes before moving to polling.
8965 */
8966static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8967{
8968 struct hfi1_devdata *dd = ppd->dd;
8969 u8 enable_lane_tx;
8970 u8 tx_polarity_inversion;
8971 u8 rx_polarity_inversion;
8972 int ret;
8973
8974 /* reset our fabric serdes to clear any lingering problems */
8975 fabric_serdes_reset(dd);
8976
8977 /* set the local tx rate - need to read-modify-write */
8978 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08008979 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008980 if (ret)
8981 goto set_local_link_attributes_fail;
8982
8983 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8984 /* set the tx rate to the fastest enabled */
8985 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8986 ppd->local_tx_rate = 1;
8987 else
8988 ppd->local_tx_rate = 0;
8989 } else {
8990 /* set the tx rate to all enabled */
8991 ppd->local_tx_rate = 0;
8992 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8993 ppd->local_tx_rate |= 2;
8994 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8995 ppd->local_tx_rate |= 1;
8996 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04008997
8998 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008999 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009000 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009001 if (ret != HCMD_SUCCESS)
9002 goto set_local_link_attributes_fail;
9003
9004 /*
9005 * DC supports continuous updates.
9006 */
Jubin John17fb4f22016-02-14 20:21:52 -08009007 ret = write_vc_local_phy(dd,
9008 0 /* no power management */,
9009 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009010 if (ret != HCMD_SUCCESS)
9011 goto set_local_link_attributes_fail;
9012
9013 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9014 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9015 ppd->port_crc_mode_enabled);
9016 if (ret != HCMD_SUCCESS)
9017 goto set_local_link_attributes_fail;
9018
9019 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009020 opa_to_vc_link_widths(
9021 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009022 if (ret != HCMD_SUCCESS)
9023 goto set_local_link_attributes_fail;
9024
9025 /* let peer know who we are */
9026 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9027 if (ret == HCMD_SUCCESS)
9028 return 0;
9029
9030set_local_link_attributes_fail:
9031 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009032 "Failed to set local link attributes, return 0x%x\n",
9033 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009034 return ret;
9035}
9036
9037/*
9038 * Call this to start the link. Schedule a retry if the cable is not
9039 * present or if unable to start polling. Do not do anything if the
9040 * link is disabled. Returns 0 if link is disabled or moved to polling
9041 */
9042int start_link(struct hfi1_pportdata *ppd)
9043{
9044 if (!ppd->link_enabled) {
9045 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009046 "%s: stopping link start because link is disabled\n",
9047 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009048 return 0;
9049 }
9050 if (!ppd->driver_link_ready) {
9051 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009052 "%s: stopping link start because driver is not ready\n",
9053 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009054 return 0;
9055 }
9056
9057 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
Jubin John17fb4f22016-02-14 20:21:52 -08009058 loopback == LOOPBACK_LCB ||
9059 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009060 return set_link_state(ppd, HLS_DN_POLL);
9061
9062 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009063 "%s: stopping link start because no cable is present\n",
9064 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009065 return -EAGAIN;
9066}
9067
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009068static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9069{
9070 struct hfi1_devdata *dd = ppd->dd;
9071 u64 mask;
9072 unsigned long timeout;
9073
9074 /*
9075 * Check for QSFP interrupt for t_init (SFF 8679)
9076 */
9077 timeout = jiffies + msecs_to_jiffies(2000);
9078 while (1) {
9079 mask = read_csr(dd, dd->hfi1_id ?
9080 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9081 if (!(mask & QSFP_HFI0_INT_N)) {
9082 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9083 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9084 break;
9085 }
9086 if (time_after(jiffies, timeout)) {
9087 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9088 __func__);
9089 break;
9090 }
9091 udelay(2);
9092 }
9093}
9094
9095static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9096{
9097 struct hfi1_devdata *dd = ppd->dd;
9098 u64 mask;
9099
9100 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9101 if (enable)
9102 mask |= (u64)QSFP_HFI0_INT_N;
9103 else
9104 mask &= ~(u64)QSFP_HFI0_INT_N;
9105 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9106}
9107
9108void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009109{
9110 struct hfi1_devdata *dd = ppd->dd;
9111 u64 mask, qsfp_mask;
9112
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009113 /* Disable INT_N from triggering QSFP interrupts */
9114 set_qsfp_int_n(ppd, 0);
9115
9116 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009117 mask = (u64)QSFP_HFI0_RESET_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009118 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009119 qsfp_mask |= mask;
Jubin John17fb4f22016-02-14 20:21:52 -08009120 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009121
Jubin John17fb4f22016-02-14 20:21:52 -08009122 qsfp_mask = read_csr(dd,
9123 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009124 qsfp_mask &= ~mask;
9125 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009126 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009127
9128 udelay(10);
9129
9130 qsfp_mask |= mask;
9131 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009132 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009133
9134 wait_for_qsfp_init(ppd);
9135
9136 /*
9137 * Allow INT_N to trigger the QSFP interrupt to watch
9138 * for alarms and warnings
9139 */
9140 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009141}
9142
9143static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9144 u8 *qsfp_interrupt_status)
9145{
9146 struct hfi1_devdata *dd = ppd->dd;
9147
9148 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009149 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9150 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9151 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009152
9153 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009154 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9155 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9156 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009157
9158 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009159 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9160 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9161 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009162
9163 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009164 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9165 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9166 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009167
9168 /* Byte 2 is vendor specific */
9169
9170 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009171 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9172 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9173 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009174
9175 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009176 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9177 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9178 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009179
9180 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009181 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9182 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9183 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009184
9185 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009186 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9187 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9188 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009189
9190 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009191 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9192 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9193 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009194
9195 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009196 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9197 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9198 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009199
9200 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009201 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9202 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9203 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009204
9205 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009206 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9207 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9208 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009209
9210 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009211 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9212 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9213 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009214
9215 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009216 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9217 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9218 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009219
9220 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009221 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9222 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9223 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009224
9225 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009226 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9227 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9228 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009229
9230 /* Bytes 9-10 and 11-12 are reserved */
9231 /* Bytes 13-15 are vendor specific */
9232
9233 return 0;
9234}
9235
Mike Marciniszyn77241052015-07-30 15:17:43 -04009236/* This routine will only be scheduled if the QSFP module is present */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009237void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009238{
9239 struct qsfp_data *qd;
9240 struct hfi1_pportdata *ppd;
9241 struct hfi1_devdata *dd;
9242
9243 qd = container_of(work, struct qsfp_data, qsfp_work);
9244 ppd = qd->ppd;
9245 dd = ppd->dd;
9246
9247 /* Sanity check */
9248 if (!qsfp_mod_present(ppd))
9249 return;
9250
9251 /*
9252 * Turn DC back on after cables has been
9253 * re-inserted. Up until now, the DC has been in
9254 * reset to save power.
9255 */
9256 dc_start(dd);
9257
9258 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009259 set_qsfp_int_n(ppd, 0);
9260
9261 wait_for_qsfp_init(ppd);
9262
9263 /*
9264 * Allow INT_N to trigger the QSFP interrupt to watch
9265 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009266 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009267 set_qsfp_int_n(ppd, 1);
9268
9269 tune_serdes(ppd);
9270
9271 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009272 }
9273
9274 if (qd->check_interrupt_flags) {
9275 u8 qsfp_interrupt_status[16] = {0,};
9276
9277 if (qsfp_read(ppd, dd->hfi1_id, 6,
9278 &qsfp_interrupt_status[0], 16) != 16) {
9279 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009280 "%s: Failed to read status of QSFP module\n",
9281 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009282 } else {
9283 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009284
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009285 handle_qsfp_error_conditions(
9286 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009287 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9288 ppd->qsfp_info.check_interrupt_flags = 0;
9289 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009290 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009291 }
9292 }
9293}
9294
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009295static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009296{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009297 struct hfi1_pportdata *ppd = dd->pport;
9298 u64 qsfp_mask, cce_int_mask;
9299 const int qsfp1_int_smask = QSFP1_INT % 64;
9300 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009301
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009302 /*
9303 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9304 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9305 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9306 * the index of the appropriate CSR in the CCEIntMask CSR array
9307 */
9308 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9309 (8 * (QSFP1_INT / 64)));
9310 if (dd->hfi1_id) {
9311 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9312 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9313 cce_int_mask);
9314 } else {
9315 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9316 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9317 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009318 }
9319
Mike Marciniszyn77241052015-07-30 15:17:43 -04009320 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9321 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009322 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9323 qsfp_mask);
9324 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9325 qsfp_mask);
9326
9327 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009328
9329 /* Handle active low nature of INT_N and MODPRST_N pins */
9330 if (qsfp_mod_present(ppd))
9331 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9332 write_csr(dd,
9333 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9334 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009335}
9336
Dean Luickbbdeb332015-12-01 15:38:15 -05009337/*
9338 * Do a one-time initialize of the LCB block.
9339 */
9340static void init_lcb(struct hfi1_devdata *dd)
9341{
Dean Luicka59329d2016-02-03 14:32:31 -08009342 /* simulator does not correctly handle LCB cclk loopback, skip */
9343 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9344 return;
9345
Dean Luickbbdeb332015-12-01 15:38:15 -05009346 /* the DC has been reset earlier in the driver load */
9347
9348 /* set LCB for cclk loopback on the port */
9349 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9350 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9351 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9352 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9353 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9354 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9355 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9356}
9357
Mike Marciniszyn77241052015-07-30 15:17:43 -04009358int bringup_serdes(struct hfi1_pportdata *ppd)
9359{
9360 struct hfi1_devdata *dd = ppd->dd;
9361 u64 guid;
9362 int ret;
9363
9364 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9365 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9366
9367 guid = ppd->guid;
9368 if (!guid) {
9369 if (dd->base_guid)
9370 guid = dd->base_guid + ppd->port - 1;
9371 ppd->guid = guid;
9372 }
9373
Mike Marciniszyn77241052015-07-30 15:17:43 -04009374 /* Set linkinit_reason on power up per OPA spec */
9375 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9376
Dean Luickbbdeb332015-12-01 15:38:15 -05009377 /* one-time init of the LCB */
9378 init_lcb(dd);
9379
Mike Marciniszyn77241052015-07-30 15:17:43 -04009380 if (loopback) {
9381 ret = init_loopback(dd);
9382 if (ret < 0)
9383 return ret;
9384 }
9385
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009386 /* tune the SERDES to a ballpark setting for
9387 * optimal signal and bit error rate
9388 * Needs to be done before starting the link
9389 */
9390 tune_serdes(ppd);
9391
Mike Marciniszyn77241052015-07-30 15:17:43 -04009392 return start_link(ppd);
9393}
9394
9395void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9396{
9397 struct hfi1_devdata *dd = ppd->dd;
9398
9399 /*
9400 * Shut down the link and keep it down. First turn off that the
9401 * driver wants to allow the link to be up (driver_link_ready).
9402 * Then make sure the link is not automatically restarted
9403 * (link_enabled). Cancel any pending restart. And finally
9404 * go offline.
9405 */
9406 ppd->driver_link_ready = 0;
9407 ppd->link_enabled = 0;
9408
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009409 ppd->offline_disabled_reason =
9410 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009411 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009412 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009413 set_link_state(ppd, HLS_DN_OFFLINE);
9414
9415 /* disable the port */
9416 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9417}
9418
9419static inline int init_cpu_counters(struct hfi1_devdata *dd)
9420{
9421 struct hfi1_pportdata *ppd;
9422 int i;
9423
9424 ppd = (struct hfi1_pportdata *)(dd + 1);
9425 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009426 ppd->ibport_data.rvp.rc_acks = NULL;
9427 ppd->ibport_data.rvp.rc_qacks = NULL;
9428 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9429 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9430 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9431 if (!ppd->ibport_data.rvp.rc_acks ||
9432 !ppd->ibport_data.rvp.rc_delayed_comp ||
9433 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009434 return -ENOMEM;
9435 }
9436
9437 return 0;
9438}
9439
9440static const char * const pt_names[] = {
9441 "expected",
9442 "eager",
9443 "invalid"
9444};
9445
9446static const char *pt_name(u32 type)
9447{
9448 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9449}
9450
9451/*
9452 * index is the index into the receive array
9453 */
9454void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9455 u32 type, unsigned long pa, u16 order)
9456{
9457 u64 reg;
9458 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9459 (dd->kregbase + RCV_ARRAY));
9460
9461 if (!(dd->flags & HFI1_PRESENT))
9462 goto done;
9463
9464 if (type == PT_INVALID) {
9465 pa = 0;
9466 } else if (type > PT_INVALID) {
9467 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009468 "unexpected receive array type %u for index %u, not handled\n",
9469 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009470 goto done;
9471 }
9472
9473 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9474 pt_name(type), index, pa, (unsigned long)order);
9475
9476#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9477 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9478 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9479 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9480 << RCV_ARRAY_RT_ADDR_SHIFT;
9481 writeq(reg, base + (index * 8));
9482
9483 if (type == PT_EAGER)
9484 /*
9485 * Eager entries are written one-by-one so we have to push them
9486 * after we write the entry.
9487 */
9488 flush_wc();
9489done:
9490 return;
9491}
9492
9493void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9494{
9495 struct hfi1_devdata *dd = rcd->dd;
9496 u32 i;
9497
9498 /* this could be optimized */
9499 for (i = rcd->eager_base; i < rcd->eager_base +
9500 rcd->egrbufs.alloced; i++)
9501 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9502
9503 for (i = rcd->expected_base;
9504 i < rcd->expected_base + rcd->expected_count; i++)
9505 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9506}
9507
9508int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9509 struct hfi1_ctxt_info *kinfo)
9510{
9511 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9512 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9513 return 0;
9514}
9515
9516struct hfi1_message_header *hfi1_get_msgheader(
9517 struct hfi1_devdata *dd, __le32 *rhf_addr)
9518{
9519 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9520
9521 return (struct hfi1_message_header *)
9522 (rhf_addr - dd->rhf_offset + offset);
9523}
9524
9525static const char * const ib_cfg_name_strings[] = {
9526 "HFI1_IB_CFG_LIDLMC",
9527 "HFI1_IB_CFG_LWID_DG_ENB",
9528 "HFI1_IB_CFG_LWID_ENB",
9529 "HFI1_IB_CFG_LWID",
9530 "HFI1_IB_CFG_SPD_ENB",
9531 "HFI1_IB_CFG_SPD",
9532 "HFI1_IB_CFG_RXPOL_ENB",
9533 "HFI1_IB_CFG_LREV_ENB",
9534 "HFI1_IB_CFG_LINKLATENCY",
9535 "HFI1_IB_CFG_HRTBT",
9536 "HFI1_IB_CFG_OP_VLS",
9537 "HFI1_IB_CFG_VL_HIGH_CAP",
9538 "HFI1_IB_CFG_VL_LOW_CAP",
9539 "HFI1_IB_CFG_OVERRUN_THRESH",
9540 "HFI1_IB_CFG_PHYERR_THRESH",
9541 "HFI1_IB_CFG_LINKDEFAULT",
9542 "HFI1_IB_CFG_PKEYS",
9543 "HFI1_IB_CFG_MTU",
9544 "HFI1_IB_CFG_LSTATE",
9545 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9546 "HFI1_IB_CFG_PMA_TICKS",
9547 "HFI1_IB_CFG_PORT"
9548};
9549
9550static const char *ib_cfg_name(int which)
9551{
9552 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9553 return "invalid";
9554 return ib_cfg_name_strings[which];
9555}
9556
9557int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9558{
9559 struct hfi1_devdata *dd = ppd->dd;
9560 int val = 0;
9561
9562 switch (which) {
9563 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9564 val = ppd->link_width_enabled;
9565 break;
9566 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9567 val = ppd->link_width_active;
9568 break;
9569 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9570 val = ppd->link_speed_enabled;
9571 break;
9572 case HFI1_IB_CFG_SPD: /* current Link speed */
9573 val = ppd->link_speed_active;
9574 break;
9575
9576 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9577 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9578 case HFI1_IB_CFG_LINKLATENCY:
9579 goto unimplemented;
9580
9581 case HFI1_IB_CFG_OP_VLS:
9582 val = ppd->vls_operational;
9583 break;
9584 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9585 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9586 break;
9587 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9588 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9589 break;
9590 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9591 val = ppd->overrun_threshold;
9592 break;
9593 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9594 val = ppd->phy_error_threshold;
9595 break;
9596 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9597 val = dd->link_default;
9598 break;
9599
9600 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9601 case HFI1_IB_CFG_PMA_TICKS:
9602 default:
9603unimplemented:
9604 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9605 dd_dev_info(
9606 dd,
9607 "%s: which %s: not implemented\n",
9608 __func__,
9609 ib_cfg_name(which));
9610 break;
9611 }
9612
9613 return val;
9614}
9615
9616/*
9617 * The largest MAD packet size.
9618 */
9619#define MAX_MAD_PACKET 2048
9620
9621/*
9622 * Return the maximum header bytes that can go on the _wire_
9623 * for this device. This count includes the ICRC which is
9624 * not part of the packet held in memory but it is appended
9625 * by the HW.
9626 * This is dependent on the device's receive header entry size.
9627 * HFI allows this to be set per-receive context, but the
9628 * driver presently enforces a global value.
9629 */
9630u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9631{
9632 /*
9633 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9634 * the Receive Header Entry Size minus the PBC (or RHF) size
9635 * plus one DW for the ICRC appended by HW.
9636 *
9637 * dd->rcd[0].rcvhdrqentsize is in DW.
9638 * We use rcd[0] as all context will have the same value. Also,
9639 * the first kernel context would have been allocated by now so
9640 * we are guaranteed a valid value.
9641 */
9642 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9643}
9644
9645/*
9646 * Set Send Length
9647 * @ppd - per port data
9648 *
9649 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9650 * registers compare against LRH.PktLen, so use the max bytes included
9651 * in the LRH.
9652 *
9653 * This routine changes all VL values except VL15, which it maintains at
9654 * the same value.
9655 */
9656static void set_send_length(struct hfi1_pportdata *ppd)
9657{
9658 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009659 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9660 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009661 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9662 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9663 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9664 int i;
9665
9666 for (i = 0; i < ppd->vls_supported; i++) {
9667 if (dd->vld[i].mtu > maxvlmtu)
9668 maxvlmtu = dd->vld[i].mtu;
9669 if (i <= 3)
9670 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9671 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9672 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9673 else
9674 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9675 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9676 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9677 }
9678 write_csr(dd, SEND_LEN_CHECK0, len1);
9679 write_csr(dd, SEND_LEN_CHECK1, len2);
9680 /* adjust kernel credit return thresholds based on new MTUs */
9681 /* all kernel receive contexts have the same hdrqentsize */
9682 for (i = 0; i < ppd->vls_supported; i++) {
9683 sc_set_cr_threshold(dd->vld[i].sc,
Jubin John17fb4f22016-02-14 20:21:52 -08009684 sc_mtu_to_threshold(dd->vld[i].sc,
9685 dd->vld[i].mtu,
9686 dd->rcd[0]->
9687 rcvhdrqentsize));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009688 }
9689 sc_set_cr_threshold(dd->vld[15].sc,
Jubin John17fb4f22016-02-14 20:21:52 -08009690 sc_mtu_to_threshold(dd->vld[15].sc,
9691 dd->vld[15].mtu,
9692 dd->rcd[0]->rcvhdrqentsize));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009693
9694 /* Adjust maximum MTU for the port in DC */
9695 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9696 (ilog2(maxvlmtu >> 8) + 1);
9697 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9698 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9699 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9700 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9701 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9702}
9703
9704static void set_lidlmc(struct hfi1_pportdata *ppd)
9705{
9706 int i;
9707 u64 sreg = 0;
9708 struct hfi1_devdata *dd = ppd->dd;
9709 u32 mask = ~((1U << ppd->lmc) - 1);
9710 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9711
9712 if (dd->hfi1_snoop.mode_flag)
9713 dd_dev_info(dd, "Set lid/lmc while snooping");
9714
9715 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9716 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9717 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -08009718 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -04009719 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9720 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9721 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9722
9723 /*
9724 * Iterate over all the send contexts and set their SLID check
9725 */
9726 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9727 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9728 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9729 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9730
9731 for (i = 0; i < dd->chip_send_contexts; i++) {
9732 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9733 i, (u32)sreg);
9734 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9735 }
9736
9737 /* Now we have to do the same thing for the sdma engines */
9738 sdma_update_lmc(dd, mask, ppd->lid);
9739}
9740
9741static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9742{
9743 unsigned long timeout;
9744 u32 curr_state;
9745
9746 timeout = jiffies + msecs_to_jiffies(msecs);
9747 while (1) {
9748 curr_state = read_physical_state(dd);
9749 if (curr_state == state)
9750 break;
9751 if (time_after(jiffies, timeout)) {
9752 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009753 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9754 state, curr_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009755 return -ETIMEDOUT;
9756 }
9757 usleep_range(1950, 2050); /* sleep 2ms-ish */
9758 }
9759
9760 return 0;
9761}
9762
9763/*
9764 * Helper for set_link_state(). Do not call except from that routine.
9765 * Expects ppd->hls_mutex to be held.
9766 *
9767 * @rem_reason value to be sent to the neighbor
9768 *
9769 * LinkDownReasons only set if transition succeeds.
9770 */
9771static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9772{
9773 struct hfi1_devdata *dd = ppd->dd;
9774 u32 pstate, previous_state;
9775 u32 last_local_state;
9776 u32 last_remote_state;
9777 int ret;
9778 int do_transition;
9779 int do_wait;
9780
9781 previous_state = ppd->host_link_state;
9782 ppd->host_link_state = HLS_GOING_OFFLINE;
9783 pstate = read_physical_state(dd);
9784 if (pstate == PLS_OFFLINE) {
9785 do_transition = 0; /* in right state */
9786 do_wait = 0; /* ...no need to wait */
9787 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9788 do_transition = 0; /* in an offline transient state */
9789 do_wait = 1; /* ...wait for it to settle */
9790 } else {
9791 do_transition = 1; /* need to move to offline */
9792 do_wait = 1; /* ...will need to wait */
9793 }
9794
9795 if (do_transition) {
9796 ret = set_physical_link_state(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009797 PLS_OFFLINE | (rem_reason << 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009798
9799 if (ret != HCMD_SUCCESS) {
9800 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009801 "Failed to transition to Offline link state, return %d\n",
9802 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009803 return -EINVAL;
9804 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009805 if (ppd->offline_disabled_reason ==
9806 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009807 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009808 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009809 }
9810
9811 if (do_wait) {
9812 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009813 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009814 if (ret < 0)
9815 return ret;
9816 }
9817
9818 /* make sure the logical state is also down */
9819 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9820
9821 /*
9822 * Now in charge of LCB - must be after the physical state is
9823 * offline.quiet and before host_link_state is changed.
9824 */
9825 set_host_lcb_access(dd);
9826 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9827 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9828
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009829 if (ppd->port_type == PORT_TYPE_QSFP &&
9830 ppd->qsfp_info.limiting_active &&
9831 qsfp_mod_present(ppd)) {
9832 set_qsfp_tx(ppd, 0);
9833 }
9834
Mike Marciniszyn77241052015-07-30 15:17:43 -04009835 /*
9836 * The LNI has a mandatory wait time after the physical state
9837 * moves to Offline.Quiet. The wait time may be different
9838 * depending on how the link went down. The 8051 firmware
9839 * will observe the needed wait time and only move to ready
9840 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009841 * is 6s, so wait that long and then at least 0.5s more for
9842 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009843 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009844 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009845 if (ret) {
9846 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009847 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009848 /* state is really offline, so make it so */
9849 ppd->host_link_state = HLS_DN_OFFLINE;
9850 return ret;
9851 }
9852
9853 /*
9854 * The state is now offline and the 8051 is ready to accept host
9855 * requests.
9856 * - change our state
9857 * - notify others if we were previously in a linkup state
9858 */
9859 ppd->host_link_state = HLS_DN_OFFLINE;
9860 if (previous_state & HLS_UP) {
9861 /* went down while link was up */
9862 handle_linkup_change(dd, 0);
9863 } else if (previous_state
9864 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9865 /* went down while attempting link up */
9866 /* byte 1 of last_*_state is the failure reason */
9867 read_last_local_state(dd, &last_local_state);
9868 read_last_remote_state(dd, &last_remote_state);
9869 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009870 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9871 last_local_state, last_remote_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009872 }
9873
9874 /* the active link width (downgrade) is 0 on link down */
9875 ppd->link_width_active = 0;
9876 ppd->link_width_downgrade_tx_active = 0;
9877 ppd->link_width_downgrade_rx_active = 0;
9878 ppd->current_egress_rate = 0;
9879 return 0;
9880}
9881
9882/* return the link state name */
9883static const char *link_state_name(u32 state)
9884{
9885 const char *name;
9886 int n = ilog2(state);
9887 static const char * const names[] = {
9888 [__HLS_UP_INIT_BP] = "INIT",
9889 [__HLS_UP_ARMED_BP] = "ARMED",
9890 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9891 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9892 [__HLS_DN_POLL_BP] = "POLL",
9893 [__HLS_DN_DISABLE_BP] = "DISABLE",
9894 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9895 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9896 [__HLS_GOING_UP_BP] = "GOING_UP",
9897 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9898 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9899 };
9900
9901 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9902 return name ? name : "unknown";
9903}
9904
9905/* return the link state reason name */
9906static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9907{
9908 if (state == HLS_UP_INIT) {
9909 switch (ppd->linkinit_reason) {
9910 case OPA_LINKINIT_REASON_LINKUP:
9911 return "(LINKUP)";
9912 case OPA_LINKINIT_REASON_FLAPPING:
9913 return "(FLAPPING)";
9914 case OPA_LINKINIT_OUTSIDE_POLICY:
9915 return "(OUTSIDE_POLICY)";
9916 case OPA_LINKINIT_QUARANTINED:
9917 return "(QUARANTINED)";
9918 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9919 return "(INSUFIC_CAPABILITY)";
9920 default:
9921 break;
9922 }
9923 }
9924 return "";
9925}
9926
9927/*
9928 * driver_physical_state - convert the driver's notion of a port's
9929 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9930 * Return -1 (converted to a u32) to indicate error.
9931 */
9932u32 driver_physical_state(struct hfi1_pportdata *ppd)
9933{
9934 switch (ppd->host_link_state) {
9935 case HLS_UP_INIT:
9936 case HLS_UP_ARMED:
9937 case HLS_UP_ACTIVE:
9938 return IB_PORTPHYSSTATE_LINKUP;
9939 case HLS_DN_POLL:
9940 return IB_PORTPHYSSTATE_POLLING;
9941 case HLS_DN_DISABLE:
9942 return IB_PORTPHYSSTATE_DISABLED;
9943 case HLS_DN_OFFLINE:
9944 return OPA_PORTPHYSSTATE_OFFLINE;
9945 case HLS_VERIFY_CAP:
9946 return IB_PORTPHYSSTATE_POLLING;
9947 case HLS_GOING_UP:
9948 return IB_PORTPHYSSTATE_POLLING;
9949 case HLS_GOING_OFFLINE:
9950 return OPA_PORTPHYSSTATE_OFFLINE;
9951 case HLS_LINK_COOLDOWN:
9952 return OPA_PORTPHYSSTATE_OFFLINE;
9953 case HLS_DN_DOWNDEF:
9954 default:
9955 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9956 ppd->host_link_state);
9957 return -1;
9958 }
9959}
9960
9961/*
9962 * driver_logical_state - convert the driver's notion of a port's
9963 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9964 * (converted to a u32) to indicate error.
9965 */
9966u32 driver_logical_state(struct hfi1_pportdata *ppd)
9967{
9968 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9969 return IB_PORT_DOWN;
9970
9971 switch (ppd->host_link_state & HLS_UP) {
9972 case HLS_UP_INIT:
9973 return IB_PORT_INIT;
9974 case HLS_UP_ARMED:
9975 return IB_PORT_ARMED;
9976 case HLS_UP_ACTIVE:
9977 return IB_PORT_ACTIVE;
9978 default:
9979 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9980 ppd->host_link_state);
9981 return -1;
9982 }
9983}
9984
9985void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9986 u8 neigh_reason, u8 rem_reason)
9987{
9988 if (ppd->local_link_down_reason.latest == 0 &&
9989 ppd->neigh_link_down_reason.latest == 0) {
9990 ppd->local_link_down_reason.latest = lcl_reason;
9991 ppd->neigh_link_down_reason.latest = neigh_reason;
9992 ppd->remote_link_down_reason = rem_reason;
9993 }
9994}
9995
9996/*
9997 * Change the physical and/or logical link state.
9998 *
9999 * Do not call this routine while inside an interrupt. It contains
10000 * calls to routines that can take multiple seconds to finish.
10001 *
10002 * Returns 0 on success, -errno on failure.
10003 */
10004int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10005{
10006 struct hfi1_devdata *dd = ppd->dd;
10007 struct ib_event event = {.device = NULL};
10008 int ret1, ret = 0;
10009 int was_up, is_down;
10010 int orig_new_state, poll_bounce;
10011
10012 mutex_lock(&ppd->hls_lock);
10013
10014 orig_new_state = state;
10015 if (state == HLS_DN_DOWNDEF)
10016 state = dd->link_default;
10017
10018 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010019 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10020 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010021
10022 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010023 link_state_name(ppd->host_link_state),
10024 link_state_name(orig_new_state),
10025 poll_bounce ? "(bounce) " : "",
10026 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010027
10028 was_up = !!(ppd->host_link_state & HLS_UP);
10029
10030 /*
10031 * If we're going to a (HLS_*) link state that implies the logical
10032 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10033 * reset is_sm_config_started to 0.
10034 */
10035 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10036 ppd->is_sm_config_started = 0;
10037
10038 /*
10039 * Do nothing if the states match. Let a poll to poll link bounce
10040 * go through.
10041 */
10042 if (ppd->host_link_state == state && !poll_bounce)
10043 goto done;
10044
10045 switch (state) {
10046 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010047 if (ppd->host_link_state == HLS_DN_POLL &&
10048 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010049 /*
10050 * Quick link up jumps from polling to here.
10051 *
10052 * Whether in normal or loopback mode, the
10053 * simulator jumps from polling to link up.
10054 * Accept that here.
10055 */
Jubin John17fb4f22016-02-14 20:21:52 -080010056 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010057 } else if (ppd->host_link_state != HLS_GOING_UP) {
10058 goto unexpected;
10059 }
10060
10061 ppd->host_link_state = HLS_UP_INIT;
10062 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10063 if (ret) {
10064 /* logical state didn't change, stay at going_up */
10065 ppd->host_link_state = HLS_GOING_UP;
10066 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010067 "%s: logical state did not change to INIT\n",
10068 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010069 } else {
10070 /* clear old transient LINKINIT_REASON code */
10071 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10072 ppd->linkinit_reason =
10073 OPA_LINKINIT_REASON_LINKUP;
10074
10075 /* enable the port */
10076 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10077
10078 handle_linkup_change(dd, 1);
10079 }
10080 break;
10081 case HLS_UP_ARMED:
10082 if (ppd->host_link_state != HLS_UP_INIT)
10083 goto unexpected;
10084
10085 ppd->host_link_state = HLS_UP_ARMED;
10086 set_logical_state(dd, LSTATE_ARMED);
10087 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10088 if (ret) {
10089 /* logical state didn't change, stay at init */
10090 ppd->host_link_state = HLS_UP_INIT;
10091 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010092 "%s: logical state did not change to ARMED\n",
10093 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010094 }
10095 /*
10096 * The simulator does not currently implement SMA messages,
10097 * so neighbor_normal is not set. Set it here when we first
10098 * move to Armed.
10099 */
10100 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10101 ppd->neighbor_normal = 1;
10102 break;
10103 case HLS_UP_ACTIVE:
10104 if (ppd->host_link_state != HLS_UP_ARMED)
10105 goto unexpected;
10106
10107 ppd->host_link_state = HLS_UP_ACTIVE;
10108 set_logical_state(dd, LSTATE_ACTIVE);
10109 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10110 if (ret) {
10111 /* logical state didn't change, stay at armed */
10112 ppd->host_link_state = HLS_UP_ARMED;
10113 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010114 "%s: logical state did not change to ACTIVE\n",
10115 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010116 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010117 /* tell all engines to go running */
10118 sdma_all_running(dd);
10119
10120 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010121 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010122 event.element.port_num = ppd->port;
10123 event.event = IB_EVENT_PORT_ACTIVE;
10124 }
10125 break;
10126 case HLS_DN_POLL:
10127 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10128 ppd->host_link_state == HLS_DN_OFFLINE) &&
10129 dd->dc_shutdown)
10130 dc_start(dd);
10131 /* Hand LED control to the DC */
10132 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10133
10134 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10135 u8 tmp = ppd->link_enabled;
10136
10137 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10138 if (ret) {
10139 ppd->link_enabled = tmp;
10140 break;
10141 }
10142 ppd->remote_link_down_reason = 0;
10143
10144 if (ppd->driver_link_ready)
10145 ppd->link_enabled = 1;
10146 }
10147
Jim Snowfb9036d2016-01-11 18:32:21 -050010148 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010149 ret = set_local_link_attributes(ppd);
10150 if (ret)
10151 break;
10152
10153 ppd->port_error_action = 0;
10154 ppd->host_link_state = HLS_DN_POLL;
10155
10156 if (quick_linkup) {
10157 /* quick linkup does not go into polling */
10158 ret = do_quick_linkup(dd);
10159 } else {
10160 ret1 = set_physical_link_state(dd, PLS_POLLING);
10161 if (ret1 != HCMD_SUCCESS) {
10162 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010163 "Failed to transition to Polling link state, return 0x%x\n",
10164 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010165 ret = -EINVAL;
10166 }
10167 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010168 ppd->offline_disabled_reason =
10169 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010170 /*
10171 * If an error occurred above, go back to offline. The
10172 * caller may reschedule another attempt.
10173 */
10174 if (ret)
10175 goto_offline(ppd, 0);
10176 break;
10177 case HLS_DN_DISABLE:
10178 /* link is disabled */
10179 ppd->link_enabled = 0;
10180
10181 /* allow any state to transition to disabled */
10182
10183 /* must transition to offline first */
10184 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10185 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10186 if (ret)
10187 break;
10188 ppd->remote_link_down_reason = 0;
10189 }
10190
10191 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10192 if (ret1 != HCMD_SUCCESS) {
10193 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010194 "Failed to transition to Disabled link state, return 0x%x\n",
10195 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010196 ret = -EINVAL;
10197 break;
10198 }
10199 ppd->host_link_state = HLS_DN_DISABLE;
10200 dc_shutdown(dd);
10201 break;
10202 case HLS_DN_OFFLINE:
10203 if (ppd->host_link_state == HLS_DN_DISABLE)
10204 dc_start(dd);
10205
10206 /* allow any state to transition to offline */
10207 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10208 if (!ret)
10209 ppd->remote_link_down_reason = 0;
10210 break;
10211 case HLS_VERIFY_CAP:
10212 if (ppd->host_link_state != HLS_DN_POLL)
10213 goto unexpected;
10214 ppd->host_link_state = HLS_VERIFY_CAP;
10215 break;
10216 case HLS_GOING_UP:
10217 if (ppd->host_link_state != HLS_VERIFY_CAP)
10218 goto unexpected;
10219
10220 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10221 if (ret1 != HCMD_SUCCESS) {
10222 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010223 "Failed to transition to link up state, return 0x%x\n",
10224 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010225 ret = -EINVAL;
10226 break;
10227 }
10228 ppd->host_link_state = HLS_GOING_UP;
10229 break;
10230
10231 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10232 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10233 default:
10234 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010235 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010236 ret = -EINVAL;
10237 break;
10238 }
10239
10240 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10241 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10242
10243 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10244 ppd->neigh_link_down_reason.sma == 0) {
10245 ppd->local_link_down_reason.sma =
10246 ppd->local_link_down_reason.latest;
10247 ppd->neigh_link_down_reason.sma =
10248 ppd->neigh_link_down_reason.latest;
10249 }
10250
10251 goto done;
10252
10253unexpected:
10254 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010255 __func__, link_state_name(ppd->host_link_state),
10256 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010257 ret = -EINVAL;
10258
10259done:
10260 mutex_unlock(&ppd->hls_lock);
10261
10262 if (event.device)
10263 ib_dispatch_event(&event);
10264
10265 return ret;
10266}
10267
10268int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10269{
10270 u64 reg;
10271 int ret = 0;
10272
10273 switch (which) {
10274 case HFI1_IB_CFG_LIDLMC:
10275 set_lidlmc(ppd);
10276 break;
10277 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10278 /*
10279 * The VL Arbitrator high limit is sent in units of 4k
10280 * bytes, while HFI stores it in units of 64 bytes.
10281 */
Jubin John8638b772016-02-14 20:19:24 -080010282 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010283 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10284 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10285 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10286 break;
10287 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10288 /* HFI only supports POLL as the default link down state */
10289 if (val != HLS_DN_POLL)
10290 ret = -EINVAL;
10291 break;
10292 case HFI1_IB_CFG_OP_VLS:
10293 if (ppd->vls_operational != val) {
10294 ppd->vls_operational = val;
10295 if (!ppd->port)
10296 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010297 }
10298 break;
10299 /*
10300 * For link width, link width downgrade, and speed enable, always AND
10301 * the setting with what is actually supported. This has two benefits.
10302 * First, enabled can't have unsupported values, no matter what the
10303 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10304 * "fill in with your supported value" have all the bits in the
10305 * field set, so simply ANDing with supported has the desired result.
10306 */
10307 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10308 ppd->link_width_enabled = val & ppd->link_width_supported;
10309 break;
10310 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10311 ppd->link_width_downgrade_enabled =
10312 val & ppd->link_width_downgrade_supported;
10313 break;
10314 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10315 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10316 break;
10317 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10318 /*
10319 * HFI does not follow IB specs, save this value
10320 * so we can report it, if asked.
10321 */
10322 ppd->overrun_threshold = val;
10323 break;
10324 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10325 /*
10326 * HFI does not follow IB specs, save this value
10327 * so we can report it, if asked.
10328 */
10329 ppd->phy_error_threshold = val;
10330 break;
10331
10332 case HFI1_IB_CFG_MTU:
10333 set_send_length(ppd);
10334 break;
10335
10336 case HFI1_IB_CFG_PKEYS:
10337 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10338 set_partition_keys(ppd);
10339 break;
10340
10341 default:
10342 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10343 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010344 "%s: which %s, val 0x%x: not implemented\n",
10345 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010346 break;
10347 }
10348 return ret;
10349}
10350
10351/* begin functions related to vl arbitration table caching */
10352static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10353{
10354 int i;
10355
10356 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10357 VL_ARB_LOW_PRIO_TABLE_SIZE);
10358 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10359 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10360
10361 /*
10362 * Note that we always return values directly from the
10363 * 'vl_arb_cache' (and do no CSR reads) in response to a
10364 * 'Get(VLArbTable)'. This is obviously correct after a
10365 * 'Set(VLArbTable)', since the cache will then be up to
10366 * date. But it's also correct prior to any 'Set(VLArbTable)'
10367 * since then both the cache, and the relevant h/w registers
10368 * will be zeroed.
10369 */
10370
10371 for (i = 0; i < MAX_PRIO_TABLE; i++)
10372 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10373}
10374
10375/*
10376 * vl_arb_lock_cache
10377 *
10378 * All other vl_arb_* functions should be called only after locking
10379 * the cache.
10380 */
10381static inline struct vl_arb_cache *
10382vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10383{
10384 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10385 return NULL;
10386 spin_lock(&ppd->vl_arb_cache[idx].lock);
10387 return &ppd->vl_arb_cache[idx];
10388}
10389
10390static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10391{
10392 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10393}
10394
10395static void vl_arb_get_cache(struct vl_arb_cache *cache,
10396 struct ib_vl_weight_elem *vl)
10397{
10398 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10399}
10400
10401static void vl_arb_set_cache(struct vl_arb_cache *cache,
10402 struct ib_vl_weight_elem *vl)
10403{
10404 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10405}
10406
10407static int vl_arb_match_cache(struct vl_arb_cache *cache,
10408 struct ib_vl_weight_elem *vl)
10409{
10410 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10411}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010412
Mike Marciniszyn77241052015-07-30 15:17:43 -040010413/* end functions related to vl arbitration table caching */
10414
10415static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10416 u32 size, struct ib_vl_weight_elem *vl)
10417{
10418 struct hfi1_devdata *dd = ppd->dd;
10419 u64 reg;
10420 unsigned int i, is_up = 0;
10421 int drain, ret = 0;
10422
10423 mutex_lock(&ppd->hls_lock);
10424
10425 if (ppd->host_link_state & HLS_UP)
10426 is_up = 1;
10427
10428 drain = !is_ax(dd) && is_up;
10429
10430 if (drain)
10431 /*
10432 * Before adjusting VL arbitration weights, empty per-VL
10433 * FIFOs, otherwise a packet whose VL weight is being
10434 * set to 0 could get stuck in a FIFO with no chance to
10435 * egress.
10436 */
10437 ret = stop_drain_data_vls(dd);
10438
10439 if (ret) {
10440 dd_dev_err(
10441 dd,
10442 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10443 __func__);
10444 goto err;
10445 }
10446
10447 for (i = 0; i < size; i++, vl++) {
10448 /*
10449 * NOTE: The low priority shift and mask are used here, but
10450 * they are the same for both the low and high registers.
10451 */
10452 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10453 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10454 | (((u64)vl->weight
10455 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10456 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10457 write_csr(dd, target + (i * 8), reg);
10458 }
10459 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10460
10461 if (drain)
10462 open_fill_data_vls(dd); /* reopen all VLs */
10463
10464err:
10465 mutex_unlock(&ppd->hls_lock);
10466
10467 return ret;
10468}
10469
10470/*
10471 * Read one credit merge VL register.
10472 */
10473static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10474 struct vl_limit *vll)
10475{
10476 u64 reg = read_csr(dd, csr);
10477
10478 vll->dedicated = cpu_to_be16(
10479 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10480 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10481 vll->shared = cpu_to_be16(
10482 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10483 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10484}
10485
10486/*
10487 * Read the current credit merge limits.
10488 */
10489static int get_buffer_control(struct hfi1_devdata *dd,
10490 struct buffer_control *bc, u16 *overall_limit)
10491{
10492 u64 reg;
10493 int i;
10494
10495 /* not all entries are filled in */
10496 memset(bc, 0, sizeof(*bc));
10497
10498 /* OPA and HFI have a 1-1 mapping */
10499 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010500 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010501
10502 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10503 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10504
10505 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10506 bc->overall_shared_limit = cpu_to_be16(
10507 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10508 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10509 if (overall_limit)
10510 *overall_limit = (reg
10511 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10512 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10513 return sizeof(struct buffer_control);
10514}
10515
10516static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10517{
10518 u64 reg;
10519 int i;
10520
10521 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10522 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10523 for (i = 0; i < sizeof(u64); i++) {
10524 u8 byte = *(((u8 *)&reg) + i);
10525
10526 dp->vlnt[2 * i] = byte & 0xf;
10527 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10528 }
10529
10530 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10531 for (i = 0; i < sizeof(u64); i++) {
10532 u8 byte = *(((u8 *)&reg) + i);
10533
10534 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10535 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10536 }
10537 return sizeof(struct sc2vlnt);
10538}
10539
10540static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10541 struct ib_vl_weight_elem *vl)
10542{
10543 unsigned int i;
10544
10545 for (i = 0; i < nelems; i++, vl++) {
10546 vl->vl = 0xf;
10547 vl->weight = 0;
10548 }
10549}
10550
10551static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10552{
10553 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080010554 DC_SC_VL_VAL(15_0,
10555 0, dp->vlnt[0] & 0xf,
10556 1, dp->vlnt[1] & 0xf,
10557 2, dp->vlnt[2] & 0xf,
10558 3, dp->vlnt[3] & 0xf,
10559 4, dp->vlnt[4] & 0xf,
10560 5, dp->vlnt[5] & 0xf,
10561 6, dp->vlnt[6] & 0xf,
10562 7, dp->vlnt[7] & 0xf,
10563 8, dp->vlnt[8] & 0xf,
10564 9, dp->vlnt[9] & 0xf,
10565 10, dp->vlnt[10] & 0xf,
10566 11, dp->vlnt[11] & 0xf,
10567 12, dp->vlnt[12] & 0xf,
10568 13, dp->vlnt[13] & 0xf,
10569 14, dp->vlnt[14] & 0xf,
10570 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010571 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080010572 DC_SC_VL_VAL(31_16,
10573 16, dp->vlnt[16] & 0xf,
10574 17, dp->vlnt[17] & 0xf,
10575 18, dp->vlnt[18] & 0xf,
10576 19, dp->vlnt[19] & 0xf,
10577 20, dp->vlnt[20] & 0xf,
10578 21, dp->vlnt[21] & 0xf,
10579 22, dp->vlnt[22] & 0xf,
10580 23, dp->vlnt[23] & 0xf,
10581 24, dp->vlnt[24] & 0xf,
10582 25, dp->vlnt[25] & 0xf,
10583 26, dp->vlnt[26] & 0xf,
10584 27, dp->vlnt[27] & 0xf,
10585 28, dp->vlnt[28] & 0xf,
10586 29, dp->vlnt[29] & 0xf,
10587 30, dp->vlnt[30] & 0xf,
10588 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010589}
10590
10591static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10592 u16 limit)
10593{
10594 if (limit != 0)
10595 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010596 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010597}
10598
10599/* change only the shared limit portion of SendCmGLobalCredit */
10600static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10601{
10602 u64 reg;
10603
10604 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10605 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10606 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10607 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10608}
10609
10610/* change only the total credit limit portion of SendCmGLobalCredit */
10611static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10612{
10613 u64 reg;
10614
10615 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10616 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10617 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10618 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10619}
10620
10621/* set the given per-VL shared limit */
10622static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10623{
10624 u64 reg;
10625 u32 addr;
10626
10627 if (vl < TXE_NUM_DATA_VL)
10628 addr = SEND_CM_CREDIT_VL + (8 * vl);
10629 else
10630 addr = SEND_CM_CREDIT_VL15;
10631
10632 reg = read_csr(dd, addr);
10633 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10634 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10635 write_csr(dd, addr, reg);
10636}
10637
10638/* set the given per-VL dedicated limit */
10639static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10640{
10641 u64 reg;
10642 u32 addr;
10643
10644 if (vl < TXE_NUM_DATA_VL)
10645 addr = SEND_CM_CREDIT_VL + (8 * vl);
10646 else
10647 addr = SEND_CM_CREDIT_VL15;
10648
10649 reg = read_csr(dd, addr);
10650 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10651 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10652 write_csr(dd, addr, reg);
10653}
10654
10655/* spin until the given per-VL status mask bits clear */
10656static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10657 const char *which)
10658{
10659 unsigned long timeout;
10660 u64 reg;
10661
10662 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10663 while (1) {
10664 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10665
10666 if (reg == 0)
10667 return; /* success */
10668 if (time_after(jiffies, timeout))
10669 break; /* timed out */
10670 udelay(1);
10671 }
10672
10673 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010674 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10675 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010676 /*
10677 * If this occurs, it is likely there was a credit loss on the link.
10678 * The only recovery from that is a link bounce.
10679 */
10680 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010681 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010682}
10683
10684/*
10685 * The number of credits on the VLs may be changed while everything
10686 * is "live", but the following algorithm must be followed due to
10687 * how the hardware is actually implemented. In particular,
10688 * Return_Credit_Status[] is the only correct status check.
10689 *
10690 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10691 * set Global_Shared_Credit_Limit = 0
10692 * use_all_vl = 1
10693 * mask0 = all VLs that are changing either dedicated or shared limits
10694 * set Shared_Limit[mask0] = 0
10695 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10696 * if (changing any dedicated limit)
10697 * mask1 = all VLs that are lowering dedicated limits
10698 * lower Dedicated_Limit[mask1]
10699 * spin until Return_Credit_Status[mask1] == 0
10700 * raise Dedicated_Limits
10701 * raise Shared_Limits
10702 * raise Global_Shared_Credit_Limit
10703 *
10704 * lower = if the new limit is lower, set the limit to the new value
10705 * raise = if the new limit is higher than the current value (may be changed
10706 * earlier in the algorithm), set the new limit to the new value
10707 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010708int set_buffer_control(struct hfi1_pportdata *ppd,
10709 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040010710{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010711 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010712 u64 changing_mask, ld_mask, stat_mask;
10713 int change_count;
10714 int i, use_all_mask;
10715 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010716 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010717 /*
10718 * A0: add the variable any_shared_limit_changing below and in the
10719 * algorithm above. If removing A0 support, it can be removed.
10720 */
10721 int any_shared_limit_changing;
10722 struct buffer_control cur_bc;
10723 u8 changing[OPA_MAX_VLS];
10724 u8 lowering_dedicated[OPA_MAX_VLS];
10725 u16 cur_total;
10726 u32 new_total = 0;
10727 const u64 all_mask =
10728 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10729 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10730 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10731 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10732 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10733 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10734 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10735 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10736 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10737
10738#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10739#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10740
Mike Marciniszyn77241052015-07-30 15:17:43 -040010741 /* find the new total credits, do sanity check on unused VLs */
10742 for (i = 0; i < OPA_MAX_VLS; i++) {
10743 if (valid_vl(i)) {
10744 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10745 continue;
10746 }
10747 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080010748 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010749 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080010750 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010751 new_bc->vl[i].dedicated = 0;
10752 new_bc->vl[i].shared = 0;
10753 }
10754 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010755
Mike Marciniszyn77241052015-07-30 15:17:43 -040010756 /* fetch the current values */
10757 get_buffer_control(dd, &cur_bc, &cur_total);
10758
10759 /*
10760 * Create the masks we will use.
10761 */
10762 memset(changing, 0, sizeof(changing));
10763 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080010764 /*
10765 * NOTE: Assumes that the individual VL bits are adjacent and in
10766 * increasing order
10767 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010768 stat_mask =
10769 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10770 changing_mask = 0;
10771 ld_mask = 0;
10772 change_count = 0;
10773 any_shared_limit_changing = 0;
10774 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10775 if (!valid_vl(i))
10776 continue;
10777 this_shared_changing = new_bc->vl[i].shared
10778 != cur_bc.vl[i].shared;
10779 if (this_shared_changing)
10780 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080010781 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10782 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010783 changing[i] = 1;
10784 changing_mask |= stat_mask;
10785 change_count++;
10786 }
10787 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10788 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10789 lowering_dedicated[i] = 1;
10790 ld_mask |= stat_mask;
10791 }
10792 }
10793
10794 /* bracket the credit change with a total adjustment */
10795 if (new_total > cur_total)
10796 set_global_limit(dd, new_total);
10797
10798 /*
10799 * Start the credit change algorithm.
10800 */
10801 use_all_mask = 0;
10802 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010803 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10804 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010805 set_global_shared(dd, 0);
10806 cur_bc.overall_shared_limit = 0;
10807 use_all_mask = 1;
10808 }
10809
10810 for (i = 0; i < NUM_USABLE_VLS; i++) {
10811 if (!valid_vl(i))
10812 continue;
10813
10814 if (changing[i]) {
10815 set_vl_shared(dd, i, 0);
10816 cur_bc.vl[i].shared = 0;
10817 }
10818 }
10819
10820 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080010821 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010822
10823 if (change_count > 0) {
10824 for (i = 0; i < NUM_USABLE_VLS; i++) {
10825 if (!valid_vl(i))
10826 continue;
10827
10828 if (lowering_dedicated[i]) {
10829 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010830 be16_to_cpu(new_bc->
10831 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010832 cur_bc.vl[i].dedicated =
10833 new_bc->vl[i].dedicated;
10834 }
10835 }
10836
10837 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10838
10839 /* now raise all dedicated that are going up */
10840 for (i = 0; i < NUM_USABLE_VLS; i++) {
10841 if (!valid_vl(i))
10842 continue;
10843
10844 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10845 be16_to_cpu(cur_bc.vl[i].dedicated))
10846 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080010847 be16_to_cpu(new_bc->
10848 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010849 }
10850 }
10851
10852 /* next raise all shared that are going up */
10853 for (i = 0; i < NUM_USABLE_VLS; i++) {
10854 if (!valid_vl(i))
10855 continue;
10856
10857 if (be16_to_cpu(new_bc->vl[i].shared) >
10858 be16_to_cpu(cur_bc.vl[i].shared))
10859 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10860 }
10861
10862 /* finally raise the global shared */
10863 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080010864 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010865 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010866 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010867
10868 /* bracket the credit change with a total adjustment */
10869 if (new_total < cur_total)
10870 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010871
10872 /*
10873 * Determine the actual number of operational VLS using the number of
10874 * dedicated and shared credits for each VL.
10875 */
10876 if (change_count > 0) {
10877 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10878 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10879 be16_to_cpu(new_bc->vl[i].shared) > 0)
10880 vl_count++;
10881 ppd->actual_vls_operational = vl_count;
10882 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10883 ppd->actual_vls_operational :
10884 ppd->vls_operational,
10885 NULL);
10886 if (ret == 0)
10887 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
10888 ppd->actual_vls_operational :
10889 ppd->vls_operational, NULL);
10890 if (ret)
10891 return ret;
10892 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040010893 return 0;
10894}
10895
10896/*
10897 * Read the given fabric manager table. Return the size of the
10898 * table (in bytes) on success, and a negative error code on
10899 * failure.
10900 */
10901int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10902
10903{
10904 int size;
10905 struct vl_arb_cache *vlc;
10906
10907 switch (which) {
10908 case FM_TBL_VL_HIGH_ARB:
10909 size = 256;
10910 /*
10911 * OPA specifies 128 elements (of 2 bytes each), though
10912 * HFI supports only 16 elements in h/w.
10913 */
10914 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10915 vl_arb_get_cache(vlc, t);
10916 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10917 break;
10918 case FM_TBL_VL_LOW_ARB:
10919 size = 256;
10920 /*
10921 * OPA specifies 128 elements (of 2 bytes each), though
10922 * HFI supports only 16 elements in h/w.
10923 */
10924 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10925 vl_arb_get_cache(vlc, t);
10926 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10927 break;
10928 case FM_TBL_BUFFER_CONTROL:
10929 size = get_buffer_control(ppd->dd, t, NULL);
10930 break;
10931 case FM_TBL_SC2VLNT:
10932 size = get_sc2vlnt(ppd->dd, t);
10933 break;
10934 case FM_TBL_VL_PREEMPT_ELEMS:
10935 size = 256;
10936 /* OPA specifies 128 elements, of 2 bytes each */
10937 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10938 break;
10939 case FM_TBL_VL_PREEMPT_MATRIX:
10940 size = 256;
10941 /*
10942 * OPA specifies that this is the same size as the VL
10943 * arbitration tables (i.e., 256 bytes).
10944 */
10945 break;
10946 default:
10947 return -EINVAL;
10948 }
10949 return size;
10950}
10951
10952/*
10953 * Write the given fabric manager table.
10954 */
10955int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10956{
10957 int ret = 0;
10958 struct vl_arb_cache *vlc;
10959
10960 switch (which) {
10961 case FM_TBL_VL_HIGH_ARB:
10962 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10963 if (vl_arb_match_cache(vlc, t)) {
10964 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10965 break;
10966 }
10967 vl_arb_set_cache(vlc, t);
10968 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10969 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10970 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10971 break;
10972 case FM_TBL_VL_LOW_ARB:
10973 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10974 if (vl_arb_match_cache(vlc, t)) {
10975 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10976 break;
10977 }
10978 vl_arb_set_cache(vlc, t);
10979 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10980 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10981 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10982 break;
10983 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010984 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010985 break;
10986 case FM_TBL_SC2VLNT:
10987 set_sc2vlnt(ppd->dd, t);
10988 break;
10989 default:
10990 ret = -EINVAL;
10991 }
10992 return ret;
10993}
10994
10995/*
10996 * Disable all data VLs.
10997 *
10998 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10999 */
11000static int disable_data_vls(struct hfi1_devdata *dd)
11001{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011002 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011003 return 1;
11004
11005 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11006
11007 return 0;
11008}
11009
11010/*
11011 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11012 * Just re-enables all data VLs (the "fill" part happens
11013 * automatically - the name was chosen for symmetry with
11014 * stop_drain_data_vls()).
11015 *
11016 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11017 */
11018int open_fill_data_vls(struct hfi1_devdata *dd)
11019{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011020 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011021 return 1;
11022
11023 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11024
11025 return 0;
11026}
11027
11028/*
11029 * drain_data_vls() - assumes that disable_data_vls() has been called,
11030 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11031 * engines to drop to 0.
11032 */
11033static void drain_data_vls(struct hfi1_devdata *dd)
11034{
11035 sc_wait(dd);
11036 sdma_wait(dd);
11037 pause_for_credit_return(dd);
11038}
11039
11040/*
11041 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11042 *
11043 * Use open_fill_data_vls() to resume using data VLs. This pair is
11044 * meant to be used like this:
11045 *
11046 * stop_drain_data_vls(dd);
11047 * // do things with per-VL resources
11048 * open_fill_data_vls(dd);
11049 */
11050int stop_drain_data_vls(struct hfi1_devdata *dd)
11051{
11052 int ret;
11053
11054 ret = disable_data_vls(dd);
11055 if (ret == 0)
11056 drain_data_vls(dd);
11057
11058 return ret;
11059}
11060
11061/*
11062 * Convert a nanosecond time to a cclock count. No matter how slow
11063 * the cclock, a non-zero ns will always have a non-zero result.
11064 */
11065u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11066{
11067 u32 cclocks;
11068
11069 if (dd->icode == ICODE_FPGA_EMULATION)
11070 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11071 else /* simulation pretends to be ASIC */
11072 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11073 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11074 cclocks = 1;
11075 return cclocks;
11076}
11077
11078/*
11079 * Convert a cclock count to nanoseconds. Not matter how slow
11080 * the cclock, a non-zero cclocks will always have a non-zero result.
11081 */
11082u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11083{
11084 u32 ns;
11085
11086 if (dd->icode == ICODE_FPGA_EMULATION)
11087 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11088 else /* simulation pretends to be ASIC */
11089 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11090 if (cclocks && !ns)
11091 ns = 1;
11092 return ns;
11093}
11094
11095/*
11096 * Dynamically adjust the receive interrupt timeout for a context based on
11097 * incoming packet rate.
11098 *
11099 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11100 */
11101static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11102{
11103 struct hfi1_devdata *dd = rcd->dd;
11104 u32 timeout = rcd->rcvavail_timeout;
11105
11106 /*
11107 * This algorithm doubles or halves the timeout depending on whether
11108 * the number of packets received in this interrupt were less than or
11109 * greater equal the interrupt count.
11110 *
11111 * The calculations below do not allow a steady state to be achieved.
11112 * Only at the endpoints it is possible to have an unchanging
11113 * timeout.
11114 */
11115 if (npkts < rcv_intr_count) {
11116 /*
11117 * Not enough packets arrived before the timeout, adjust
11118 * timeout downward.
11119 */
11120 if (timeout < 2) /* already at minimum? */
11121 return;
11122 timeout >>= 1;
11123 } else {
11124 /*
11125 * More than enough packets arrived before the timeout, adjust
11126 * timeout upward.
11127 */
11128 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11129 return;
11130 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11131 }
11132
11133 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011134 /*
11135 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11136 * been verified to be in range
11137 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011138 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011139 (u64)timeout <<
11140 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011141}
11142
11143void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11144 u32 intr_adjust, u32 npkts)
11145{
11146 struct hfi1_devdata *dd = rcd->dd;
11147 u64 reg;
11148 u32 ctxt = rcd->ctxt;
11149
11150 /*
11151 * Need to write timeout register before updating RcvHdrHead to ensure
11152 * that a new value is used when the HW decides to restart counting.
11153 */
11154 if (intr_adjust)
11155 adjust_rcv_timeout(rcd, npkts);
11156 if (updegr) {
11157 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11158 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11159 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11160 }
11161 mmiowb();
11162 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11163 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11164 << RCV_HDR_HEAD_HEAD_SHIFT);
11165 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11166 mmiowb();
11167}
11168
11169u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11170{
11171 u32 head, tail;
11172
11173 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11174 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11175
11176 if (rcd->rcvhdrtail_kvaddr)
11177 tail = get_rcvhdrtail(rcd);
11178 else
11179 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11180
11181 return head == tail;
11182}
11183
11184/*
11185 * Context Control and Receive Array encoding for buffer size:
11186 * 0x0 invalid
11187 * 0x1 4 KB
11188 * 0x2 8 KB
11189 * 0x3 16 KB
11190 * 0x4 32 KB
11191 * 0x5 64 KB
11192 * 0x6 128 KB
11193 * 0x7 256 KB
11194 * 0x8 512 KB (Receive Array only)
11195 * 0x9 1 MB (Receive Array only)
11196 * 0xa 2 MB (Receive Array only)
11197 *
11198 * 0xB-0xF - reserved (Receive Array only)
11199 *
11200 *
11201 * This routine assumes that the value has already been sanity checked.
11202 */
11203static u32 encoded_size(u32 size)
11204{
11205 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011206 case 4 * 1024: return 0x1;
11207 case 8 * 1024: return 0x2;
11208 case 16 * 1024: return 0x3;
11209 case 32 * 1024: return 0x4;
11210 case 64 * 1024: return 0x5;
11211 case 128 * 1024: return 0x6;
11212 case 256 * 1024: return 0x7;
11213 case 512 * 1024: return 0x8;
11214 case 1 * 1024 * 1024: return 0x9;
11215 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011216 }
11217 return 0x1; /* if invalid, go with the minimum size */
11218}
11219
11220void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11221{
11222 struct hfi1_ctxtdata *rcd;
11223 u64 rcvctrl, reg;
11224 int did_enable = 0;
11225
11226 rcd = dd->rcd[ctxt];
11227 if (!rcd)
11228 return;
11229
11230 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11231
11232 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11233 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011234 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11235 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011236 /* reset the tail and hdr addresses, and sequence count */
11237 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11238 rcd->rcvhdrq_phys);
11239 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11240 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11241 rcd->rcvhdrqtailaddr_phys);
11242 rcd->seq_cnt = 1;
11243
11244 /* reset the cached receive header queue head value */
11245 rcd->head = 0;
11246
11247 /*
11248 * Zero the receive header queue so we don't get false
11249 * positives when checking the sequence number. The
11250 * sequence numbers could land exactly on the same spot.
11251 * E.g. a rcd restart before the receive header wrapped.
11252 */
11253 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11254
11255 /* starting timeout */
11256 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11257
11258 /* enable the context */
11259 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11260
11261 /* clean the egr buffer size first */
11262 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11263 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11264 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11265 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11266
11267 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11268 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11269 did_enable = 1;
11270
11271 /* zero RcvEgrIndexHead */
11272 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11273
11274 /* set eager count and base index */
11275 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11276 & RCV_EGR_CTRL_EGR_CNT_MASK)
11277 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11278 (((rcd->eager_base >> RCV_SHIFT)
11279 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11280 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11281 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11282
11283 /*
11284 * Set TID (expected) count and base index.
11285 * rcd->expected_count is set to individual RcvArray entries,
11286 * not pairs, and the CSR takes a pair-count in groups of
11287 * four, so divide by 8.
11288 */
11289 reg = (((rcd->expected_count >> RCV_SHIFT)
11290 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11291 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11292 (((rcd->expected_base >> RCV_SHIFT)
11293 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11294 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11295 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011296 if (ctxt == HFI1_CTRL_CTXT)
11297 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011298 }
11299 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11300 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011301 /*
11302 * When receive context is being disabled turn on tail
11303 * update with a dummy tail address and then disable
11304 * receive context.
11305 */
11306 if (dd->rcvhdrtail_dummy_physaddr) {
11307 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11308 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011309 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011310 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11311 }
11312
Mike Marciniszyn77241052015-07-30 15:17:43 -040011313 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11314 }
11315 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11316 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11317 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11318 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11319 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11320 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011321 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11322 /* See comment on RcvCtxtCtrl.TailUpd above */
11323 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11324 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11325 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011326 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11327 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11328 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11329 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11330 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011331 /*
11332 * In one-packet-per-eager mode, the size comes from
11333 * the RcvArray entry.
11334 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011335 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11336 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11337 }
11338 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11339 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11340 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11341 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11342 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11343 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11344 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11345 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11346 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11347 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11348 rcd->rcvctrl = rcvctrl;
11349 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11350 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11351
11352 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011353 if (did_enable &&
11354 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011355 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11356 if (reg != 0) {
11357 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011358 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011359 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11360 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11361 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11362 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11363 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11364 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011365 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011366 }
11367 }
11368
11369 if (did_enable) {
11370 /*
11371 * The interrupt timeout and count must be set after
11372 * the context is enabled to take effect.
11373 */
11374 /* set interrupt timeout */
11375 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011376 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011377 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11378
11379 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11380 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11381 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11382 }
11383
11384 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11385 /*
11386 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011387 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11388 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011389 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011390 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11391 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011392}
11393
11394u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11395 u64 **cntrp)
11396{
11397 int ret;
11398 u64 val = 0;
11399
11400 if (namep) {
11401 ret = dd->cntrnameslen;
11402 if (pos != 0) {
11403 dd_dev_err(dd, "read_cntrs does not support indexing");
11404 return 0;
11405 }
11406 *namep = dd->cntrnames;
11407 } else {
11408 const struct cntr_entry *entry;
11409 int i, j;
11410
11411 ret = (dd->ndevcntrs) * sizeof(u64);
11412 if (pos != 0) {
11413 dd_dev_err(dd, "read_cntrs does not support indexing");
11414 return 0;
11415 }
11416
11417 /* Get the start of the block of counters */
11418 *cntrp = dd->cntrs;
11419
11420 /*
11421 * Now go and fill in each counter in the block.
11422 */
11423 for (i = 0; i < DEV_CNTR_LAST; i++) {
11424 entry = &dev_cntrs[i];
11425 hfi1_cdbg(CNTR, "reading %s", entry->name);
11426 if (entry->flags & CNTR_DISABLED) {
11427 /* Nothing */
11428 hfi1_cdbg(CNTR, "\tDisabled\n");
11429 } else {
11430 if (entry->flags & CNTR_VL) {
11431 hfi1_cdbg(CNTR, "\tPer VL\n");
11432 for (j = 0; j < C_VL_COUNT; j++) {
11433 val = entry->rw_cntr(entry,
11434 dd, j,
11435 CNTR_MODE_R,
11436 0);
11437 hfi1_cdbg(
11438 CNTR,
11439 "\t\tRead 0x%llx for %d\n",
11440 val, j);
11441 dd->cntrs[entry->offset + j] =
11442 val;
11443 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011444 } else if (entry->flags & CNTR_SDMA) {
11445 hfi1_cdbg(CNTR,
11446 "\t Per SDMA Engine\n");
11447 for (j = 0; j < dd->chip_sdma_engines;
11448 j++) {
11449 val =
11450 entry->rw_cntr(entry, dd, j,
11451 CNTR_MODE_R, 0);
11452 hfi1_cdbg(CNTR,
11453 "\t\tRead 0x%llx for %d\n",
11454 val, j);
11455 dd->cntrs[entry->offset + j] =
11456 val;
11457 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011458 } else {
11459 val = entry->rw_cntr(entry, dd,
11460 CNTR_INVALID_VL,
11461 CNTR_MODE_R, 0);
11462 dd->cntrs[entry->offset] = val;
11463 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11464 }
11465 }
11466 }
11467 }
11468 return ret;
11469}
11470
11471/*
11472 * Used by sysfs to create files for hfi stats to read
11473 */
11474u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11475 char **namep, u64 **cntrp)
11476{
11477 int ret;
11478 u64 val = 0;
11479
11480 if (namep) {
11481 ret = dd->portcntrnameslen;
11482 if (pos != 0) {
11483 dd_dev_err(dd, "index not supported");
11484 return 0;
11485 }
11486 *namep = dd->portcntrnames;
11487 } else {
11488 const struct cntr_entry *entry;
11489 struct hfi1_pportdata *ppd;
11490 int i, j;
11491
11492 ret = (dd->nportcntrs) * sizeof(u64);
11493 if (pos != 0) {
11494 dd_dev_err(dd, "indexing not supported");
11495 return 0;
11496 }
11497 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11498 *cntrp = ppd->cntrs;
11499
11500 for (i = 0; i < PORT_CNTR_LAST; i++) {
11501 entry = &port_cntrs[i];
11502 hfi1_cdbg(CNTR, "reading %s", entry->name);
11503 if (entry->flags & CNTR_DISABLED) {
11504 /* Nothing */
11505 hfi1_cdbg(CNTR, "\tDisabled\n");
11506 continue;
11507 }
11508
11509 if (entry->flags & CNTR_VL) {
11510 hfi1_cdbg(CNTR, "\tPer VL");
11511 for (j = 0; j < C_VL_COUNT; j++) {
11512 val = entry->rw_cntr(entry, ppd, j,
11513 CNTR_MODE_R,
11514 0);
11515 hfi1_cdbg(
11516 CNTR,
11517 "\t\tRead 0x%llx for %d",
11518 val, j);
11519 ppd->cntrs[entry->offset + j] = val;
11520 }
11521 } else {
11522 val = entry->rw_cntr(entry, ppd,
11523 CNTR_INVALID_VL,
11524 CNTR_MODE_R,
11525 0);
11526 ppd->cntrs[entry->offset] = val;
11527 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11528 }
11529 }
11530 }
11531 return ret;
11532}
11533
11534static void free_cntrs(struct hfi1_devdata *dd)
11535{
11536 struct hfi1_pportdata *ppd;
11537 int i;
11538
11539 if (dd->synth_stats_timer.data)
11540 del_timer_sync(&dd->synth_stats_timer);
11541 dd->synth_stats_timer.data = 0;
11542 ppd = (struct hfi1_pportdata *)(dd + 1);
11543 for (i = 0; i < dd->num_pports; i++, ppd++) {
11544 kfree(ppd->cntrs);
11545 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011546 free_percpu(ppd->ibport_data.rvp.rc_acks);
11547 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11548 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011549 ppd->cntrs = NULL;
11550 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011551 ppd->ibport_data.rvp.rc_acks = NULL;
11552 ppd->ibport_data.rvp.rc_qacks = NULL;
11553 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011554 }
11555 kfree(dd->portcntrnames);
11556 dd->portcntrnames = NULL;
11557 kfree(dd->cntrs);
11558 dd->cntrs = NULL;
11559 kfree(dd->scntrs);
11560 dd->scntrs = NULL;
11561 kfree(dd->cntrnames);
11562 dd->cntrnames = NULL;
11563}
11564
11565#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11566#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11567
11568static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11569 u64 *psval, void *context, int vl)
11570{
11571 u64 val;
11572 u64 sval = *psval;
11573
11574 if (entry->flags & CNTR_DISABLED) {
11575 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11576 return 0;
11577 }
11578
11579 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11580
11581 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11582
11583 /* If its a synthetic counter there is more work we need to do */
11584 if (entry->flags & CNTR_SYNTH) {
11585 if (sval == CNTR_MAX) {
11586 /* No need to read already saturated */
11587 return CNTR_MAX;
11588 }
11589
11590 if (entry->flags & CNTR_32BIT) {
11591 /* 32bit counters can wrap multiple times */
11592 u64 upper = sval >> 32;
11593 u64 lower = (sval << 32) >> 32;
11594
11595 if (lower > val) { /* hw wrapped */
11596 if (upper == CNTR_32BIT_MAX)
11597 val = CNTR_MAX;
11598 else
11599 upper++;
11600 }
11601
11602 if (val != CNTR_MAX)
11603 val = (upper << 32) | val;
11604
11605 } else {
11606 /* If we rolled we are saturated */
11607 if ((val < sval) || (val > CNTR_MAX))
11608 val = CNTR_MAX;
11609 }
11610 }
11611
11612 *psval = val;
11613
11614 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11615
11616 return val;
11617}
11618
11619static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11620 struct cntr_entry *entry,
11621 u64 *psval, void *context, int vl, u64 data)
11622{
11623 u64 val;
11624
11625 if (entry->flags & CNTR_DISABLED) {
11626 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11627 return 0;
11628 }
11629
11630 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11631
11632 if (entry->flags & CNTR_SYNTH) {
11633 *psval = data;
11634 if (entry->flags & CNTR_32BIT) {
11635 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11636 (data << 32) >> 32);
11637 val = data; /* return the full 64bit value */
11638 } else {
11639 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11640 data);
11641 }
11642 } else {
11643 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11644 }
11645
11646 *psval = val;
11647
11648 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11649
11650 return val;
11651}
11652
11653u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11654{
11655 struct cntr_entry *entry;
11656 u64 *sval;
11657
11658 entry = &dev_cntrs[index];
11659 sval = dd->scntrs + entry->offset;
11660
11661 if (vl != CNTR_INVALID_VL)
11662 sval += vl;
11663
11664 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11665}
11666
11667u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11668{
11669 struct cntr_entry *entry;
11670 u64 *sval;
11671
11672 entry = &dev_cntrs[index];
11673 sval = dd->scntrs + entry->offset;
11674
11675 if (vl != CNTR_INVALID_VL)
11676 sval += vl;
11677
11678 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11679}
11680
11681u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11682{
11683 struct cntr_entry *entry;
11684 u64 *sval;
11685
11686 entry = &port_cntrs[index];
11687 sval = ppd->scntrs + entry->offset;
11688
11689 if (vl != CNTR_INVALID_VL)
11690 sval += vl;
11691
11692 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11693 (index <= C_RCV_HDR_OVF_LAST)) {
11694 /* We do not want to bother for disabled contexts */
11695 return 0;
11696 }
11697
11698 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11699}
11700
11701u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11702{
11703 struct cntr_entry *entry;
11704 u64 *sval;
11705
11706 entry = &port_cntrs[index];
11707 sval = ppd->scntrs + entry->offset;
11708
11709 if (vl != CNTR_INVALID_VL)
11710 sval += vl;
11711
11712 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11713 (index <= C_RCV_HDR_OVF_LAST)) {
11714 /* We do not want to bother for disabled contexts */
11715 return 0;
11716 }
11717
11718 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11719}
11720
11721static void update_synth_timer(unsigned long opaque)
11722{
11723 u64 cur_tx;
11724 u64 cur_rx;
11725 u64 total_flits;
11726 u8 update = 0;
11727 int i, j, vl;
11728 struct hfi1_pportdata *ppd;
11729 struct cntr_entry *entry;
11730
11731 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11732
11733 /*
11734 * Rather than keep beating on the CSRs pick a minimal set that we can
11735 * check to watch for potential roll over. We can do this by looking at
11736 * the number of flits sent/recv. If the total flits exceeds 32bits then
11737 * we have to iterate all the counters and update.
11738 */
11739 entry = &dev_cntrs[C_DC_RCV_FLITS];
11740 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11741
11742 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11743 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11744
11745 hfi1_cdbg(
11746 CNTR,
11747 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11748 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11749
11750 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11751 /*
11752 * May not be strictly necessary to update but it won't hurt and
11753 * simplifies the logic here.
11754 */
11755 update = 1;
11756 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11757 dd->unit);
11758 } else {
11759 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11760 hfi1_cdbg(CNTR,
11761 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11762 total_flits, (u64)CNTR_32BIT_MAX);
11763 if (total_flits >= CNTR_32BIT_MAX) {
11764 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11765 dd->unit);
11766 update = 1;
11767 }
11768 }
11769
11770 if (update) {
11771 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11772 for (i = 0; i < DEV_CNTR_LAST; i++) {
11773 entry = &dev_cntrs[i];
11774 if (entry->flags & CNTR_VL) {
11775 for (vl = 0; vl < C_VL_COUNT; vl++)
11776 read_dev_cntr(dd, i, vl);
11777 } else {
11778 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11779 }
11780 }
11781 ppd = (struct hfi1_pportdata *)(dd + 1);
11782 for (i = 0; i < dd->num_pports; i++, ppd++) {
11783 for (j = 0; j < PORT_CNTR_LAST; j++) {
11784 entry = &port_cntrs[j];
11785 if (entry->flags & CNTR_VL) {
11786 for (vl = 0; vl < C_VL_COUNT; vl++)
11787 read_port_cntr(ppd, j, vl);
11788 } else {
11789 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11790 }
11791 }
11792 }
11793
11794 /*
11795 * We want the value in the register. The goal is to keep track
11796 * of the number of "ticks" not the counter value. In other
11797 * words if the register rolls we want to notice it and go ahead
11798 * and force an update.
11799 */
11800 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11801 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11802 CNTR_MODE_R, 0);
11803
11804 entry = &dev_cntrs[C_DC_RCV_FLITS];
11805 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11806 CNTR_MODE_R, 0);
11807
11808 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11809 dd->unit, dd->last_tx, dd->last_rx);
11810
11811 } else {
11812 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11813 }
11814
11815mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11816}
11817
11818#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11819static int init_cntrs(struct hfi1_devdata *dd)
11820{
Dean Luickc024c552016-01-11 18:30:57 -050011821 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011822 size_t sz;
11823 char *p;
11824 char name[C_MAX_NAME];
11825 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011826 const char *bit_type_32 = ",32";
11827 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011828
11829 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011830 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11831 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011832
11833 /***********************/
11834 /* per device counters */
11835 /***********************/
11836
11837 /* size names and determine how many we have*/
11838 dd->ndevcntrs = 0;
11839 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011840
11841 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011842 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11843 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11844 continue;
11845 }
11846
11847 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050011848 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011849 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011850 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080011851 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011852 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011853 /* Add ",32" for 32-bit counters */
11854 if (dev_cntrs[i].flags & CNTR_32BIT)
11855 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011856 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011857 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011858 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011859 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050011860 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011861 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011862 snprintf(name, C_MAX_NAME, "%s%d",
11863 dev_cntrs[i].name, j);
11864 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011865 /* Add ",32" for 32-bit counters */
11866 if (dev_cntrs[i].flags & CNTR_32BIT)
11867 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011868 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011869 dd->ndevcntrs++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011870 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011871 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011872 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011873 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011874 /* Add ",32" for 32-bit counters */
11875 if (dev_cntrs[i].flags & CNTR_32BIT)
11876 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050011877 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011878 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011879 }
11880 }
11881
11882 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011883 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011884 if (!dd->cntrs)
11885 goto bail;
11886
Dean Luickc024c552016-01-11 18:30:57 -050011887 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011888 if (!dd->scntrs)
11889 goto bail;
11890
Mike Marciniszyn77241052015-07-30 15:17:43 -040011891 /* allocate space for the counter names */
11892 dd->cntrnameslen = sz;
11893 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11894 if (!dd->cntrnames)
11895 goto bail;
11896
11897 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011898 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011899 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11900 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011901 } else if (dev_cntrs[i].flags & CNTR_VL) {
11902 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011903 snprintf(name, C_MAX_NAME, "%s%d",
11904 dev_cntrs[i].name,
11905 vl_from_idx(j));
11906 memcpy(p, name, strlen(name));
11907 p += strlen(name);
11908
11909 /* Counter is 32 bits */
11910 if (dev_cntrs[i].flags & CNTR_32BIT) {
11911 memcpy(p, bit_type_32, bit_type_32_sz);
11912 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011913 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011914
Mike Marciniszyn77241052015-07-30 15:17:43 -040011915 *p++ = '\n';
11916 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011917 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11918 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011919 snprintf(name, C_MAX_NAME, "%s%d",
11920 dev_cntrs[i].name, j);
11921 memcpy(p, name, strlen(name));
11922 p += strlen(name);
11923
11924 /* Counter is 32 bits */
11925 if (dev_cntrs[i].flags & CNTR_32BIT) {
11926 memcpy(p, bit_type_32, bit_type_32_sz);
11927 p += bit_type_32_sz;
11928 }
11929
11930 *p++ = '\n';
11931 }
11932 } else {
11933 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
11934 p += strlen(dev_cntrs[i].name);
11935
11936 /* Counter is 32 bits */
11937 if (dev_cntrs[i].flags & CNTR_32BIT) {
11938 memcpy(p, bit_type_32, bit_type_32_sz);
11939 p += bit_type_32_sz;
11940 }
11941
11942 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040011943 }
11944 }
11945
11946 /*********************/
11947 /* per port counters */
11948 /*********************/
11949
11950 /*
11951 * Go through the counters for the overflows and disable the ones we
11952 * don't need. This varies based on platform so we need to do it
11953 * dynamically here.
11954 */
11955 rcv_ctxts = dd->num_rcv_contexts;
11956 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11957 i <= C_RCV_HDR_OVF_LAST; i++) {
11958 port_cntrs[i].flags |= CNTR_DISABLED;
11959 }
11960
11961 /* size port counter names and determine how many we have*/
11962 sz = 0;
11963 dd->nportcntrs = 0;
11964 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011965 if (port_cntrs[i].flags & CNTR_DISABLED) {
11966 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11967 continue;
11968 }
11969
11970 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011971 port_cntrs[i].offset = dd->nportcntrs;
11972 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011973 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080011974 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011975 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011976 /* Add ",32" for 32-bit counters */
11977 if (port_cntrs[i].flags & CNTR_32BIT)
11978 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011979 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011980 dd->nportcntrs++;
11981 }
11982 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011983 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011984 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011985 /* Add ",32" for 32-bit counters */
11986 if (port_cntrs[i].flags & CNTR_32BIT)
11987 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011988 port_cntrs[i].offset = dd->nportcntrs;
11989 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011990 }
11991 }
11992
11993 /* allocate space for the counter names */
11994 dd->portcntrnameslen = sz;
11995 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11996 if (!dd->portcntrnames)
11997 goto bail;
11998
11999 /* fill in port cntr names */
12000 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12001 if (port_cntrs[i].flags & CNTR_DISABLED)
12002 continue;
12003
12004 if (port_cntrs[i].flags & CNTR_VL) {
12005 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012006 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012007 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012008 memcpy(p, name, strlen(name));
12009 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012010
12011 /* Counter is 32 bits */
12012 if (port_cntrs[i].flags & CNTR_32BIT) {
12013 memcpy(p, bit_type_32, bit_type_32_sz);
12014 p += bit_type_32_sz;
12015 }
12016
Mike Marciniszyn77241052015-07-30 15:17:43 -040012017 *p++ = '\n';
12018 }
12019 } else {
12020 memcpy(p, port_cntrs[i].name,
12021 strlen(port_cntrs[i].name));
12022 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012023
12024 /* Counter is 32 bits */
12025 if (port_cntrs[i].flags & CNTR_32BIT) {
12026 memcpy(p, bit_type_32, bit_type_32_sz);
12027 p += bit_type_32_sz;
12028 }
12029
Mike Marciniszyn77241052015-07-30 15:17:43 -040012030 *p++ = '\n';
12031 }
12032 }
12033
12034 /* allocate per port storage for counter values */
12035 ppd = (struct hfi1_pportdata *)(dd + 1);
12036 for (i = 0; i < dd->num_pports; i++, ppd++) {
12037 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12038 if (!ppd->cntrs)
12039 goto bail;
12040
12041 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12042 if (!ppd->scntrs)
12043 goto bail;
12044 }
12045
12046 /* CPU counters need to be allocated and zeroed */
12047 if (init_cpu_counters(dd))
12048 goto bail;
12049
12050 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12051 return 0;
12052bail:
12053 free_cntrs(dd);
12054 return -ENOMEM;
12055}
12056
Mike Marciniszyn77241052015-07-30 15:17:43 -040012057static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12058{
12059 switch (chip_lstate) {
12060 default:
12061 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012062 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12063 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012064 /* fall through */
12065 case LSTATE_DOWN:
12066 return IB_PORT_DOWN;
12067 case LSTATE_INIT:
12068 return IB_PORT_INIT;
12069 case LSTATE_ARMED:
12070 return IB_PORT_ARMED;
12071 case LSTATE_ACTIVE:
12072 return IB_PORT_ACTIVE;
12073 }
12074}
12075
12076u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12077{
12078 /* look at the HFI meta-states only */
12079 switch (chip_pstate & 0xf0) {
12080 default:
12081 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012082 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012083 /* fall through */
12084 case PLS_DISABLED:
12085 return IB_PORTPHYSSTATE_DISABLED;
12086 case PLS_OFFLINE:
12087 return OPA_PORTPHYSSTATE_OFFLINE;
12088 case PLS_POLLING:
12089 return IB_PORTPHYSSTATE_POLLING;
12090 case PLS_CONFIGPHY:
12091 return IB_PORTPHYSSTATE_TRAINING;
12092 case PLS_LINKUP:
12093 return IB_PORTPHYSSTATE_LINKUP;
12094 case PLS_PHYTEST:
12095 return IB_PORTPHYSSTATE_PHY_TEST;
12096 }
12097}
12098
12099/* return the OPA port logical state name */
12100const char *opa_lstate_name(u32 lstate)
12101{
12102 static const char * const port_logical_names[] = {
12103 "PORT_NOP",
12104 "PORT_DOWN",
12105 "PORT_INIT",
12106 "PORT_ARMED",
12107 "PORT_ACTIVE",
12108 "PORT_ACTIVE_DEFER",
12109 };
12110 if (lstate < ARRAY_SIZE(port_logical_names))
12111 return port_logical_names[lstate];
12112 return "unknown";
12113}
12114
12115/* return the OPA port physical state name */
12116const char *opa_pstate_name(u32 pstate)
12117{
12118 static const char * const port_physical_names[] = {
12119 "PHYS_NOP",
12120 "reserved1",
12121 "PHYS_POLL",
12122 "PHYS_DISABLED",
12123 "PHYS_TRAINING",
12124 "PHYS_LINKUP",
12125 "PHYS_LINK_ERR_RECOVER",
12126 "PHYS_PHY_TEST",
12127 "reserved8",
12128 "PHYS_OFFLINE",
12129 "PHYS_GANGED",
12130 "PHYS_TEST",
12131 };
12132 if (pstate < ARRAY_SIZE(port_physical_names))
12133 return port_physical_names[pstate];
12134 return "unknown";
12135}
12136
12137/*
12138 * Read the hardware link state and set the driver's cached value of it.
12139 * Return the (new) current value.
12140 */
12141u32 get_logical_state(struct hfi1_pportdata *ppd)
12142{
12143 u32 new_state;
12144
12145 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12146 if (new_state != ppd->lstate) {
12147 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012148 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012149 ppd->lstate = new_state;
12150 }
12151 /*
12152 * Set port status flags in the page mapped into userspace
12153 * memory. Do it here to ensure a reliable state - this is
12154 * the only function called by all state handling code.
12155 * Always set the flags due to the fact that the cache value
12156 * might have been changed explicitly outside of this
12157 * function.
12158 */
12159 if (ppd->statusp) {
12160 switch (ppd->lstate) {
12161 case IB_PORT_DOWN:
12162 case IB_PORT_INIT:
12163 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12164 HFI1_STATUS_IB_READY);
12165 break;
12166 case IB_PORT_ARMED:
12167 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12168 break;
12169 case IB_PORT_ACTIVE:
12170 *ppd->statusp |= HFI1_STATUS_IB_READY;
12171 break;
12172 }
12173 }
12174 return ppd->lstate;
12175}
12176
12177/**
12178 * wait_logical_linkstate - wait for an IB link state change to occur
12179 * @ppd: port device
12180 * @state: the state to wait for
12181 * @msecs: the number of milliseconds to wait
12182 *
12183 * Wait up to msecs milliseconds for IB link state change to occur.
12184 * For now, take the easy polling route.
12185 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12186 */
12187static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12188 int msecs)
12189{
12190 unsigned long timeout;
12191
12192 timeout = jiffies + msecs_to_jiffies(msecs);
12193 while (1) {
12194 if (get_logical_state(ppd) == state)
12195 return 0;
12196 if (time_after(jiffies, timeout))
12197 break;
12198 msleep(20);
12199 }
12200 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12201
12202 return -ETIMEDOUT;
12203}
12204
12205u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12206{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012207 u32 pstate;
12208 u32 ib_pstate;
12209
12210 pstate = read_physical_state(ppd->dd);
12211 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012212 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012213 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012214 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12215 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12216 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012217 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012218 }
12219 return ib_pstate;
12220}
12221
12222/*
12223 * Read/modify/write ASIC_QSFP register bits as selected by mask
12224 * data: 0 or 1 in the positions depending on what needs to be written
12225 * dir: 0 for read, 1 for write
12226 * mask: select by setting
12227 * I2CCLK (bit 0)
12228 * I2CDATA (bit 1)
12229 */
12230u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12231 u32 mask)
12232{
12233 u64 qsfp_oe, target_oe;
12234
12235 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12236 if (mask) {
12237 /* We are writing register bits, so lock access */
12238 dir &= mask;
12239 data &= mask;
12240
12241 qsfp_oe = read_csr(dd, target_oe);
12242 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12243 write_csr(dd, target_oe, qsfp_oe);
12244 }
12245 /* We are exclusively reading bits here, but it is unlikely
12246 * we'll get valid data when we set the direction of the pin
12247 * in the same call, so read should call this function again
12248 * to get valid data
12249 */
12250 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12251}
12252
12253#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12254(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12255
12256#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12257(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12258
12259int hfi1_init_ctxt(struct send_context *sc)
12260{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012261 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012262 struct hfi1_devdata *dd = sc->dd;
12263 u64 reg;
12264 u8 set = (sc->type == SC_USER ?
12265 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12266 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12267 reg = read_kctxt_csr(dd, sc->hw_context,
12268 SEND_CTXT_CHECK_ENABLE);
12269 if (set)
12270 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12271 else
12272 SET_STATIC_RATE_CONTROL_SMASK(reg);
12273 write_kctxt_csr(dd, sc->hw_context,
12274 SEND_CTXT_CHECK_ENABLE, reg);
12275 }
12276 return 0;
12277}
12278
12279int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12280{
12281 int ret = 0;
12282 u64 reg;
12283
12284 if (dd->icode != ICODE_RTL_SILICON) {
12285 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12286 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12287 __func__);
12288 return -EINVAL;
12289 }
12290 reg = read_csr(dd, ASIC_STS_THERM);
12291 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12292 ASIC_STS_THERM_CURR_TEMP_MASK);
12293 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12294 ASIC_STS_THERM_LO_TEMP_MASK);
12295 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12296 ASIC_STS_THERM_HI_TEMP_MASK);
12297 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12298 ASIC_STS_THERM_CRIT_TEMP_MASK);
12299 /* triggers is a 3-bit value - 1 bit per trigger. */
12300 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12301
12302 return ret;
12303}
12304
12305/* ========================================================================= */
12306
12307/*
12308 * Enable/disable chip from delivering interrupts.
12309 */
12310void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12311{
12312 int i;
12313
12314 /*
12315 * In HFI, the mask needs to be 1 to allow interrupts.
12316 */
12317 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012318 /* enable all interrupts */
12319 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012320 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012321
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012322 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012323 } else {
12324 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012325 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012326 }
12327}
12328
12329/*
12330 * Clear all interrupt sources on the chip.
12331 */
12332static void clear_all_interrupts(struct hfi1_devdata *dd)
12333{
12334 int i;
12335
12336 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012337 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012338
12339 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12340 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12341 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12342 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12343 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12344 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12345 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12346 for (i = 0; i < dd->chip_send_contexts; i++)
12347 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12348 for (i = 0; i < dd->chip_sdma_engines; i++)
12349 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12350
12351 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12352 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12353 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12354}
12355
12356/* Move to pcie.c? */
12357static void disable_intx(struct pci_dev *pdev)
12358{
12359 pci_intx(pdev, 0);
12360}
12361
12362static void clean_up_interrupts(struct hfi1_devdata *dd)
12363{
12364 int i;
12365
12366 /* remove irqs - must happen before disabling/turning off */
12367 if (dd->num_msix_entries) {
12368 /* MSI-X */
12369 struct hfi1_msix_entry *me = dd->msix_entries;
12370
12371 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012372 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012373 continue;
12374 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012375 free_irq(me->msix.vector, me->arg);
12376 }
12377 } else {
12378 /* INTx */
12379 if (dd->requested_intx_irq) {
12380 free_irq(dd->pcidev->irq, dd);
12381 dd->requested_intx_irq = 0;
12382 }
12383 }
12384
12385 /* turn off interrupts */
12386 if (dd->num_msix_entries) {
12387 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012388 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012389 } else {
12390 /* INTx */
12391 disable_intx(dd->pcidev);
12392 }
12393
12394 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012395 kfree(dd->msix_entries);
12396 dd->msix_entries = NULL;
12397 dd->num_msix_entries = 0;
12398}
12399
12400/*
12401 * Remap the interrupt source from the general handler to the given MSI-X
12402 * interrupt.
12403 */
12404static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12405{
12406 u64 reg;
12407 int m, n;
12408
12409 /* clear from the handled mask of the general interrupt */
12410 m = isrc / 64;
12411 n = isrc % 64;
12412 dd->gi_mask[m] &= ~((u64)1 << n);
12413
12414 /* direct the chip source to the given MSI-X interrupt */
12415 m = isrc / 8;
12416 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012417 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12418 reg &= ~((u64)0xff << (8 * n));
12419 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12420 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012421}
12422
12423static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12424 int engine, int msix_intr)
12425{
12426 /*
12427 * SDMA engine interrupt sources grouped by type, rather than
12428 * engine. Per-engine interrupts are as follows:
12429 * SDMA
12430 * SDMAProgress
12431 * SDMAIdle
12432 */
Jubin John8638b772016-02-14 20:19:24 -080012433 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012434 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012435 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012436 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012437 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012438 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012439}
12440
Mike Marciniszyn77241052015-07-30 15:17:43 -040012441static int request_intx_irq(struct hfi1_devdata *dd)
12442{
12443 int ret;
12444
Jubin John98050712015-11-16 21:59:27 -050012445 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12446 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012447 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012448 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012449 if (ret)
12450 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012451 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012452 else
12453 dd->requested_intx_irq = 1;
12454 return ret;
12455}
12456
12457static int request_msix_irqs(struct hfi1_devdata *dd)
12458{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012459 int first_general, last_general;
12460 int first_sdma, last_sdma;
12461 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012462 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012463
12464 /* calculate the ranges we are going to use */
12465 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012466 last_general = first_general + 1;
12467 first_sdma = last_general;
12468 last_sdma = first_sdma + dd->num_sdma;
12469 first_rx = last_sdma;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012470 last_rx = first_rx + dd->n_krcv_queues;
12471
12472 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012473 * Sanity check - the code expects all SDMA chip source
12474 * interrupts to be in the same CSR, starting at bit 0. Verify
12475 * that this is true by checking the bit location of the start.
12476 */
12477 BUILD_BUG_ON(IS_SDMA_START % 64);
12478
12479 for (i = 0; i < dd->num_msix_entries; i++) {
12480 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12481 const char *err_info;
12482 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012483 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012484 void *arg;
12485 int idx;
12486 struct hfi1_ctxtdata *rcd = NULL;
12487 struct sdma_engine *sde = NULL;
12488
12489 /* obtain the arguments to request_irq */
12490 if (first_general <= i && i < last_general) {
12491 idx = i - first_general;
12492 handler = general_interrupt;
12493 arg = dd;
12494 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012495 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012496 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012497 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012498 } else if (first_sdma <= i && i < last_sdma) {
12499 idx = i - first_sdma;
12500 sde = &dd->per_sdma[idx];
12501 handler = sdma_interrupt;
12502 arg = sde;
12503 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012504 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012505 err_info = "sdma";
12506 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012507 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012508 } else if (first_rx <= i && i < last_rx) {
12509 idx = i - first_rx;
12510 rcd = dd->rcd[idx];
12511 /* no interrupt if no rcd */
12512 if (!rcd)
12513 continue;
12514 /*
12515 * Set the interrupt register and mask for this
12516 * context's interrupt.
12517 */
Jubin John8638b772016-02-14 20:19:24 -080012518 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012519 rcd->imask = ((u64)1) <<
Jubin John8638b772016-02-14 20:19:24 -080012520 ((IS_RCVAVAIL_START + idx) % 64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012521 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012522 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012523 arg = rcd;
12524 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012525 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012526 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012527 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012528 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012529 } else {
12530 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080012531 * ignore it
12532 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012533 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012534 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012535 continue;
12536 }
12537 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080012538 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012539 continue;
12540 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080012541 me->name[sizeof(me->name) - 1] = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012542
Dean Luickf4f30031c2015-10-26 10:28:44 -040012543 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080012544 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012545 if (ret) {
12546 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012547 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12548 err_info, me->msix.vector, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012549 return ret;
12550 }
12551 /*
12552 * assign arg after request_irq call, so it will be
12553 * cleaned up
12554 */
12555 me->arg = arg;
12556
Mitko Haralanov957558c2016-02-03 14:33:40 -080012557 ret = hfi1_get_irq_affinity(dd, me);
12558 if (ret)
12559 dd_dev_err(dd,
12560 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012561 }
12562
Mike Marciniszyn77241052015-07-30 15:17:43 -040012563 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012564}
12565
12566/*
12567 * Set the general handler to accept all interrupts, remap all
12568 * chip interrupts back to MSI-X 0.
12569 */
12570static void reset_interrupts(struct hfi1_devdata *dd)
12571{
12572 int i;
12573
12574 /* all interrupts handled by the general handler */
12575 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12576 dd->gi_mask[i] = ~(u64)0;
12577
12578 /* all chip interrupts map to MSI-X 0 */
12579 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012580 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012581}
12582
12583static int set_up_interrupts(struct hfi1_devdata *dd)
12584{
12585 struct hfi1_msix_entry *entries;
12586 u32 total, request;
12587 int i, ret;
12588 int single_interrupt = 0; /* we expect to have all the interrupts */
12589
12590 /*
12591 * Interrupt count:
12592 * 1 general, "slow path" interrupt (includes the SDMA engines
12593 * slow source, SDMACleanupDone)
12594 * N interrupts - one per used SDMA engine
12595 * M interrupt - one per kernel receive context
12596 */
12597 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12598
12599 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12600 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012601 ret = -ENOMEM;
12602 goto fail;
12603 }
12604 /* 1-1 MSI-X entry assignment */
12605 for (i = 0; i < total; i++)
12606 entries[i].msix.entry = i;
12607
12608 /* ask for MSI-X interrupts */
12609 request = total;
12610 request_msix(dd, &request, entries);
12611
12612 if (request == 0) {
12613 /* using INTx */
12614 /* dd->num_msix_entries already zero */
12615 kfree(entries);
12616 single_interrupt = 1;
12617 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12618 } else {
12619 /* using MSI-X */
12620 dd->num_msix_entries = request;
12621 dd->msix_entries = entries;
12622
12623 if (request != total) {
12624 /* using MSI-X, with reduced interrupts */
12625 dd_dev_err(
12626 dd,
12627 "cannot handle reduced interrupt case, want %u, got %u\n",
12628 total, request);
12629 ret = -EINVAL;
12630 goto fail;
12631 }
12632 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12633 }
12634
12635 /* mask all interrupts */
12636 set_intr_state(dd, 0);
12637 /* clear all pending interrupts */
12638 clear_all_interrupts(dd);
12639
12640 /* reset general handler mask, chip MSI-X mappings */
12641 reset_interrupts(dd);
12642
12643 if (single_interrupt)
12644 ret = request_intx_irq(dd);
12645 else
12646 ret = request_msix_irqs(dd);
12647 if (ret)
12648 goto fail;
12649
12650 return 0;
12651
12652fail:
12653 clean_up_interrupts(dd);
12654 return ret;
12655}
12656
12657/*
12658 * Set up context values in dd. Sets:
12659 *
12660 * num_rcv_contexts - number of contexts being used
12661 * n_krcv_queues - number of kernel contexts
12662 * first_user_ctxt - first non-kernel context in array of contexts
12663 * freectxts - number of free user contexts
12664 * num_send_contexts - number of PIO send contexts being used
12665 */
12666static int set_up_context_variables(struct hfi1_devdata *dd)
12667{
12668 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012669 int total_contexts;
12670 int ret;
12671 unsigned ngroups;
12672
12673 /*
12674 * Kernel contexts: (to be fixed later):
12675 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012676 * - Context 0 - control context (VL15/multicast/error)
12677 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012678 */
12679 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012680 /*
12681 * Don't count context 0 in n_krcvqs since
12682 * is isn't used for normal verbs traffic.
12683 *
12684 * krcvqs will reflect number of kernel
12685 * receive contexts above 0.
12686 */
12687 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012688 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012689 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012690 num_kernel_contexts =
12691 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12692 /*
12693 * Every kernel receive context needs an ACK send context.
12694 * one send context is allocated for each VL{0-7} and VL15
12695 */
12696 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12697 dd_dev_err(dd,
12698 "Reducing # kernel rcv contexts to: %d, from %d\n",
12699 (int)(dd->chip_send_contexts - num_vls - 1),
12700 (int)num_kernel_contexts);
12701 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12702 }
12703 /*
12704 * User contexts: (to be fixed later)
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012705 * - default to 1 user context per CPU if num_user_contexts is
12706 * negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012707 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012708 if (num_user_contexts < 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012709 num_user_contexts = num_online_cpus();
12710
12711 total_contexts = num_kernel_contexts + num_user_contexts;
12712
12713 /*
12714 * Adjust the counts given a global max.
12715 */
12716 if (total_contexts > dd->chip_rcv_contexts) {
12717 dd_dev_err(dd,
12718 "Reducing # user receive contexts to: %d, from %d\n",
12719 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12720 (int)num_user_contexts);
12721 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12722 /* recalculate */
12723 total_contexts = num_kernel_contexts + num_user_contexts;
12724 }
12725
12726 /* the first N are kernel contexts, the rest are user contexts */
12727 dd->num_rcv_contexts = total_contexts;
12728 dd->n_krcv_queues = num_kernel_contexts;
12729 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012730 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012731 dd->freectxts = num_user_contexts;
12732 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012733 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12734 (int)dd->chip_rcv_contexts,
12735 (int)dd->num_rcv_contexts,
12736 (int)dd->n_krcv_queues,
12737 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012738
12739 /*
12740 * Receive array allocation:
12741 * All RcvArray entries are divided into groups of 8. This
12742 * is required by the hardware and will speed up writes to
12743 * consecutive entries by using write-combining of the entire
12744 * cacheline.
12745 *
12746 * The number of groups are evenly divided among all contexts.
12747 * any left over groups will be given to the first N user
12748 * contexts.
12749 */
12750 dd->rcv_entries.group_size = RCV_INCREMENT;
12751 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12752 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12753 dd->rcv_entries.nctxt_extra = ngroups -
12754 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12755 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12756 dd->rcv_entries.ngroups,
12757 dd->rcv_entries.nctxt_extra);
12758 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12759 MAX_EAGER_ENTRIES * 2) {
12760 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12761 dd->rcv_entries.group_size;
12762 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012763 "RcvArray group count too high, change to %u\n",
12764 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012765 dd->rcv_entries.nctxt_extra = 0;
12766 }
12767 /*
12768 * PIO send contexts
12769 */
12770 ret = init_sc_pools_and_sizes(dd);
12771 if (ret >= 0) { /* success */
12772 dd->num_send_contexts = ret;
12773 dd_dev_info(
12774 dd,
12775 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12776 dd->chip_send_contexts,
12777 dd->num_send_contexts,
12778 dd->sc_sizes[SC_KERNEL].count,
12779 dd->sc_sizes[SC_ACK].count,
12780 dd->sc_sizes[SC_USER].count);
12781 ret = 0; /* success */
12782 }
12783
12784 return ret;
12785}
12786
12787/*
12788 * Set the device/port partition key table. The MAD code
12789 * will ensure that, at least, the partial management
12790 * partition key is present in the table.
12791 */
12792static void set_partition_keys(struct hfi1_pportdata *ppd)
12793{
12794 struct hfi1_devdata *dd = ppd->dd;
12795 u64 reg = 0;
12796 int i;
12797
12798 dd_dev_info(dd, "Setting partition keys\n");
12799 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12800 reg |= (ppd->pkeys[i] &
12801 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12802 ((i % 4) *
12803 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12804 /* Each register holds 4 PKey values. */
12805 if ((i % 4) == 3) {
12806 write_csr(dd, RCV_PARTITION_KEY +
12807 ((i - 3) * 2), reg);
12808 reg = 0;
12809 }
12810 }
12811
12812 /* Always enable HW pkeys check when pkeys table is set */
12813 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12814}
12815
12816/*
12817 * These CSRs and memories are uninitialized on reset and must be
12818 * written before reading to set the ECC/parity bits.
12819 *
12820 * NOTE: All user context CSRs that are not mmaped write-only
12821 * (e.g. the TID flows) must be initialized even if the driver never
12822 * reads them.
12823 */
12824static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12825{
12826 int i, j;
12827
12828 /* CceIntMap */
12829 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012830 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012831
12832 /* SendCtxtCreditReturnAddr */
12833 for (i = 0; i < dd->chip_send_contexts; i++)
12834 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12835
12836 /* PIO Send buffers */
12837 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080012838 /*
12839 * These are not normally read, and (presently) have no method
12840 * to be read, so are not pre-initialized
12841 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012842
12843 /* RcvHdrAddr */
12844 /* RcvHdrTailAddr */
12845 /* RcvTidFlowTable */
12846 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12847 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12848 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12849 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080012850 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012851 }
12852
12853 /* RcvArray */
12854 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080012855 write_csr(dd, RCV_ARRAY + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080012856 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012857
12858 /* RcvQPMapTable */
12859 for (i = 0; i < 32; i++)
12860 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12861}
12862
12863/*
12864 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12865 */
12866static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12867 u64 ctrl_bits)
12868{
12869 unsigned long timeout;
12870 u64 reg;
12871
12872 /* is the condition present? */
12873 reg = read_csr(dd, CCE_STATUS);
12874 if ((reg & status_bits) == 0)
12875 return;
12876
12877 /* clear the condition */
12878 write_csr(dd, CCE_CTRL, ctrl_bits);
12879
12880 /* wait for the condition to clear */
12881 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12882 while (1) {
12883 reg = read_csr(dd, CCE_STATUS);
12884 if ((reg & status_bits) == 0)
12885 return;
12886 if (time_after(jiffies, timeout)) {
12887 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012888 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12889 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012890 return;
12891 }
12892 udelay(1);
12893 }
12894}
12895
12896/* set CCE CSRs to chip reset defaults */
12897static void reset_cce_csrs(struct hfi1_devdata *dd)
12898{
12899 int i;
12900
12901 /* CCE_REVISION read-only */
12902 /* CCE_REVISION2 read-only */
12903 /* CCE_CTRL - bits clear automatically */
12904 /* CCE_STATUS read-only, use CceCtrl to clear */
12905 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12906 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12907 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12908 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12909 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12910 /* CCE_ERR_STATUS read-only */
12911 write_csr(dd, CCE_ERR_MASK, 0);
12912 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12913 /* CCE_ERR_FORCE leave alone */
12914 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12915 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12916 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12917 /* CCE_PCIE_CTRL leave alone */
12918 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12919 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12920 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080012921 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012922 }
12923 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12924 /* CCE_MSIX_PBA read-only */
12925 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12926 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12927 }
12928 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12929 write_csr(dd, CCE_INT_MAP, 0);
12930 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12931 /* CCE_INT_STATUS read-only */
12932 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12933 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12934 /* CCE_INT_FORCE leave alone */
12935 /* CCE_INT_BLOCKED read-only */
12936 }
12937 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12938 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12939}
12940
12941/* set ASIC CSRs to chip reset defaults */
12942static void reset_asic_csrs(struct hfi1_devdata *dd)
12943{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012944 int i;
12945
12946 /*
12947 * If the HFIs are shared between separate nodes or VMs,
12948 * then more will need to be done here. One idea is a module
12949 * parameter that returns early, letting the first power-on or
12950 * a known first load do the reset and blocking all others.
12951 */
12952
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012953 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12954 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012955
12956 if (dd->icode != ICODE_FPGA_EMULATION) {
12957 /* emulation does not have an SBus - leave these alone */
12958 /*
12959 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12960 * Notes:
12961 * o The reset is not zero if aimed at the core. See the
12962 * SBus documentation for details.
12963 * o If the SBus firmware has been updated (e.g. by the BIOS),
12964 * will the reset revert that?
12965 */
12966 /* ASIC_CFG_SBUS_REQUEST leave alone */
12967 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12968 }
12969 /* ASIC_SBUS_RESULT read-only */
12970 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12971 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12972 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12973 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012974
12975 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012976 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012977
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050012978 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012979 /* ASIC_STS_THERM read-only */
12980 /* ASIC_CFG_RESET leave alone */
12981
12982 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12983 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12984 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12985 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12986 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12987 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12988 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12989 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12990 for (i = 0; i < 16; i++)
12991 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12992
12993 /* ASIC_GPIO_IN read-only */
12994 write_csr(dd, ASIC_GPIO_OE, 0);
12995 write_csr(dd, ASIC_GPIO_INVERT, 0);
12996 write_csr(dd, ASIC_GPIO_OUT, 0);
12997 write_csr(dd, ASIC_GPIO_MASK, 0);
12998 /* ASIC_GPIO_STATUS read-only */
12999 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
13000 /* ASIC_GPIO_FORCE leave alone */
13001
13002 /* ASIC_QSFP1_IN read-only */
13003 write_csr(dd, ASIC_QSFP1_OE, 0);
13004 write_csr(dd, ASIC_QSFP1_INVERT, 0);
13005 write_csr(dd, ASIC_QSFP1_OUT, 0);
13006 write_csr(dd, ASIC_QSFP1_MASK, 0);
13007 /* ASIC_QSFP1_STATUS read-only */
13008 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
13009 /* ASIC_QSFP1_FORCE leave alone */
13010
13011 /* ASIC_QSFP2_IN read-only */
13012 write_csr(dd, ASIC_QSFP2_OE, 0);
13013 write_csr(dd, ASIC_QSFP2_INVERT, 0);
13014 write_csr(dd, ASIC_QSFP2_OUT, 0);
13015 write_csr(dd, ASIC_QSFP2_MASK, 0);
13016 /* ASIC_QSFP2_STATUS read-only */
13017 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
13018 /* ASIC_QSFP2_FORCE leave alone */
13019
13020 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
13021 /* this also writes a NOP command, clearing paging mode */
13022 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
13023 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013024}
13025
13026/* set MISC CSRs to chip reset defaults */
13027static void reset_misc_csrs(struct hfi1_devdata *dd)
13028{
13029 int i;
13030
13031 for (i = 0; i < 32; i++) {
13032 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13033 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13034 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13035 }
Jubin John4d114fd2016-02-14 20:21:43 -080013036 /*
13037 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13038 * only be written 128-byte chunks
13039 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013040 /* init RSA engine to clear lingering errors */
13041 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13042 write_csr(dd, MISC_CFG_RSA_MU, 0);
13043 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13044 /* MISC_STS_8051_DIGEST read-only */
13045 /* MISC_STS_SBM_DIGEST read-only */
13046 /* MISC_STS_PCIE_DIGEST read-only */
13047 /* MISC_STS_FAB_DIGEST read-only */
13048 /* MISC_ERR_STATUS read-only */
13049 write_csr(dd, MISC_ERR_MASK, 0);
13050 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13051 /* MISC_ERR_FORCE leave alone */
13052}
13053
13054/* set TXE CSRs to chip reset defaults */
13055static void reset_txe_csrs(struct hfi1_devdata *dd)
13056{
13057 int i;
13058
13059 /*
13060 * TXE Kernel CSRs
13061 */
13062 write_csr(dd, SEND_CTRL, 0);
13063 __cm_reset(dd, 0); /* reset CM internal state */
13064 /* SEND_CONTEXTS read-only */
13065 /* SEND_DMA_ENGINES read-only */
13066 /* SEND_PIO_MEM_SIZE read-only */
13067 /* SEND_DMA_MEM_SIZE read-only */
13068 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13069 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13070 /* SEND_PIO_ERR_STATUS read-only */
13071 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13072 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13073 /* SEND_PIO_ERR_FORCE leave alone */
13074 /* SEND_DMA_ERR_STATUS read-only */
13075 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13076 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13077 /* SEND_DMA_ERR_FORCE leave alone */
13078 /* SEND_EGRESS_ERR_STATUS read-only */
13079 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13080 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13081 /* SEND_EGRESS_ERR_FORCE leave alone */
13082 write_csr(dd, SEND_BTH_QP, 0);
13083 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13084 write_csr(dd, SEND_SC2VLT0, 0);
13085 write_csr(dd, SEND_SC2VLT1, 0);
13086 write_csr(dd, SEND_SC2VLT2, 0);
13087 write_csr(dd, SEND_SC2VLT3, 0);
13088 write_csr(dd, SEND_LEN_CHECK0, 0);
13089 write_csr(dd, SEND_LEN_CHECK1, 0);
13090 /* SEND_ERR_STATUS read-only */
13091 write_csr(dd, SEND_ERR_MASK, 0);
13092 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13093 /* SEND_ERR_FORCE read-only */
13094 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013095 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013096 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013097 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13098 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13099 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013100 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013101 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013102 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013103 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013104 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013105 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013106 /* SEND_CM_CREDIT_USED_STATUS read-only */
13107 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13108 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13109 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13110 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13111 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13112 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013113 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013114 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13115 /* SEND_CM_CREDIT_USED_VL read-only */
13116 /* SEND_CM_CREDIT_USED_VL15 read-only */
13117 /* SEND_EGRESS_CTXT_STATUS read-only */
13118 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13119 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13120 /* SEND_EGRESS_ERR_INFO read-only */
13121 /* SEND_EGRESS_ERR_SOURCE read-only */
13122
13123 /*
13124 * TXE Per-Context CSRs
13125 */
13126 for (i = 0; i < dd->chip_send_contexts; i++) {
13127 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13128 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13129 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13130 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13131 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13132 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13133 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13134 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13135 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13136 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13137 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13138 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13139 }
13140
13141 /*
13142 * TXE Per-SDMA CSRs
13143 */
13144 for (i = 0; i < dd->chip_sdma_engines; i++) {
13145 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13146 /* SEND_DMA_STATUS read-only */
13147 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13148 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13149 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13150 /* SEND_DMA_HEAD read-only */
13151 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13152 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13153 /* SEND_DMA_IDLE_CNT read-only */
13154 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13155 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13156 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13157 /* SEND_DMA_ENG_ERR_STATUS read-only */
13158 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13159 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13160 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13161 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13162 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13163 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13164 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13165 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13166 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13167 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13168 }
13169}
13170
13171/*
13172 * Expect on entry:
13173 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13174 */
13175static void init_rbufs(struct hfi1_devdata *dd)
13176{
13177 u64 reg;
13178 int count;
13179
13180 /*
13181 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13182 * clear.
13183 */
13184 count = 0;
13185 while (1) {
13186 reg = read_csr(dd, RCV_STATUS);
13187 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13188 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13189 break;
13190 /*
13191 * Give up after 1ms - maximum wait time.
13192 *
13193 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13194 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13195 * 148 KB / (66% * 250MB/s) = 920us
13196 */
13197 if (count++ > 500) {
13198 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013199 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13200 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013201 break;
13202 }
13203 udelay(2); /* do not busy-wait the CSR */
13204 }
13205
13206 /* start the init - expect RcvCtrl to be 0 */
13207 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13208
13209 /*
13210 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13211 * period after the write before RcvStatus.RxRbufInitDone is valid.
13212 * The delay in the first run through the loop below is sufficient and
13213 * required before the first read of RcvStatus.RxRbufInintDone.
13214 */
13215 read_csr(dd, RCV_CTRL);
13216
13217 /* wait for the init to finish */
13218 count = 0;
13219 while (1) {
13220 /* delay is required first time through - see above */
13221 udelay(2); /* do not busy-wait the CSR */
13222 reg = read_csr(dd, RCV_STATUS);
13223 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13224 break;
13225
13226 /* give up after 100us - slowest possible at 33MHz is 73us */
13227 if (count++ > 50) {
13228 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013229 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13230 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013231 break;
13232 }
13233 }
13234}
13235
13236/* set RXE CSRs to chip reset defaults */
13237static void reset_rxe_csrs(struct hfi1_devdata *dd)
13238{
13239 int i, j;
13240
13241 /*
13242 * RXE Kernel CSRs
13243 */
13244 write_csr(dd, RCV_CTRL, 0);
13245 init_rbufs(dd);
13246 /* RCV_STATUS read-only */
13247 /* RCV_CONTEXTS read-only */
13248 /* RCV_ARRAY_CNT read-only */
13249 /* RCV_BUF_SIZE read-only */
13250 write_csr(dd, RCV_BTH_QP, 0);
13251 write_csr(dd, RCV_MULTICAST, 0);
13252 write_csr(dd, RCV_BYPASS, 0);
13253 write_csr(dd, RCV_VL15, 0);
13254 /* this is a clear-down */
13255 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013256 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013257 /* RCV_ERR_STATUS read-only */
13258 write_csr(dd, RCV_ERR_MASK, 0);
13259 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13260 /* RCV_ERR_FORCE leave alone */
13261 for (i = 0; i < 32; i++)
13262 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13263 for (i = 0; i < 4; i++)
13264 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13265 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13266 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13267 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13268 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13269 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13270 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13271 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13272 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13273 }
13274 for (i = 0; i < 32; i++)
13275 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13276
13277 /*
13278 * RXE Kernel and User Per-Context CSRs
13279 */
13280 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13281 /* kernel */
13282 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13283 /* RCV_CTXT_STATUS read-only */
13284 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13285 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13286 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13287 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13288 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13289 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13290 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13291 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13292 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13293 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13294
13295 /* user */
13296 /* RCV_HDR_TAIL read-only */
13297 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13298 /* RCV_EGR_INDEX_TAIL read-only */
13299 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13300 /* RCV_EGR_OFFSET_TAIL read-only */
13301 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013302 write_uctxt_csr(dd, i,
13303 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013304 }
13305 }
13306}
13307
13308/*
13309 * Set sc2vl tables.
13310 *
13311 * They power on to zeros, so to avoid send context errors
13312 * they need to be set:
13313 *
13314 * SC 0-7 -> VL 0-7 (respectively)
13315 * SC 15 -> VL 15
13316 * otherwise
13317 * -> VL 0
13318 */
13319static void init_sc2vl_tables(struct hfi1_devdata *dd)
13320{
13321 int i;
13322 /* init per architecture spec, constrained by hardware capability */
13323
13324 /* HFI maps sent packets */
13325 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13326 0,
13327 0, 0, 1, 1,
13328 2, 2, 3, 3,
13329 4, 4, 5, 5,
13330 6, 6, 7, 7));
13331 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13332 1,
13333 8, 0, 9, 0,
13334 10, 0, 11, 0,
13335 12, 0, 13, 0,
13336 14, 0, 15, 15));
13337 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13338 2,
13339 16, 0, 17, 0,
13340 18, 0, 19, 0,
13341 20, 0, 21, 0,
13342 22, 0, 23, 0));
13343 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13344 3,
13345 24, 0, 25, 0,
13346 26, 0, 27, 0,
13347 28, 0, 29, 0,
13348 30, 0, 31, 0));
13349
13350 /* DC maps received packets */
13351 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13352 15_0,
13353 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13354 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13355 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13356 31_16,
13357 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13358 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13359
13360 /* initialize the cached sc2vl values consistently with h/w */
13361 for (i = 0; i < 32; i++) {
13362 if (i < 8 || i == 15)
13363 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13364 else
13365 *((u8 *)(dd->sc2vl) + i) = 0;
13366 }
13367}
13368
13369/*
13370 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13371 * depend on the chip going through a power-on reset - a driver may be loaded
13372 * and unloaded many times.
13373 *
13374 * Do not write any CSR values to the chip in this routine - there may be
13375 * a reset following the (possible) FLR in this routine.
13376 *
13377 */
13378static void init_chip(struct hfi1_devdata *dd)
13379{
13380 int i;
13381
13382 /*
13383 * Put the HFI CSRs in a known state.
13384 * Combine this with a DC reset.
13385 *
13386 * Stop the device from doing anything while we do a
13387 * reset. We know there are no other active users of
13388 * the device since we are now in charge. Turn off
13389 * off all outbound and inbound traffic and make sure
13390 * the device does not generate any interrupts.
13391 */
13392
13393 /* disable send contexts and SDMA engines */
13394 write_csr(dd, SEND_CTRL, 0);
13395 for (i = 0; i < dd->chip_send_contexts; i++)
13396 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13397 for (i = 0; i < dd->chip_sdma_engines; i++)
13398 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13399 /* disable port (turn off RXE inbound traffic) and contexts */
13400 write_csr(dd, RCV_CTRL, 0);
13401 for (i = 0; i < dd->chip_rcv_contexts; i++)
13402 write_csr(dd, RCV_CTXT_CTRL, 0);
13403 /* mask all interrupt sources */
13404 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013405 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013406
13407 /*
13408 * DC Reset: do a full DC reset before the register clear.
13409 * A recommended length of time to hold is one CSR read,
13410 * so reread the CceDcCtrl. Then, hold the DC in reset
13411 * across the clear.
13412 */
13413 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013414 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013415
13416 if (use_flr) {
13417 /*
13418 * A FLR will reset the SPC core and part of the PCIe.
13419 * The parts that need to be restored have already been
13420 * saved.
13421 */
13422 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13423
13424 /* do the FLR, the DC reset will remain */
13425 hfi1_pcie_flr(dd);
13426
13427 /* restore command and BARs */
13428 restore_pci_variables(dd);
13429
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013430 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013431 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13432 hfi1_pcie_flr(dd);
13433 restore_pci_variables(dd);
13434 }
13435
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013436 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013437 } else {
13438 dd_dev_info(dd, "Resetting CSRs with writes\n");
13439 reset_cce_csrs(dd);
13440 reset_txe_csrs(dd);
13441 reset_rxe_csrs(dd);
13442 reset_asic_csrs(dd);
13443 reset_misc_csrs(dd);
13444 }
13445 /* clear the DC reset */
13446 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013447
Mike Marciniszyn77241052015-07-30 15:17:43 -040013448 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013449 setextled(dd, 0);
13450
Mike Marciniszyn77241052015-07-30 15:17:43 -040013451 /*
13452 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013453 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013454 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013455 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013456 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013457 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013458 * I2CCLK and I2CDAT will change per direction, and INT_N and
13459 * MODPRS_N are input only and their value is ignored.
13460 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013461 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13462 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013463}
13464
13465static void init_early_variables(struct hfi1_devdata *dd)
13466{
13467 int i;
13468
13469 /* assign link credit variables */
13470 dd->vau = CM_VAU;
13471 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013472 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013473 dd->link_credits--;
13474 dd->vcu = cu_to_vcu(hfi1_cu);
13475 /* enough room for 8 MAD packets plus header - 17K */
13476 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13477 if (dd->vl15_init > dd->link_credits)
13478 dd->vl15_init = dd->link_credits;
13479
13480 write_uninitialized_csrs_and_memories(dd);
13481
13482 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13483 for (i = 0; i < dd->num_pports; i++) {
13484 struct hfi1_pportdata *ppd = &dd->pport[i];
13485
13486 set_partition_keys(ppd);
13487 }
13488 init_sc2vl_tables(dd);
13489}
13490
13491static void init_kdeth_qp(struct hfi1_devdata *dd)
13492{
13493 /* user changed the KDETH_QP */
13494 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13495 /* out of range or illegal value */
13496 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13497 kdeth_qp = 0;
13498 }
13499 if (kdeth_qp == 0) /* not set, or failed range check */
13500 kdeth_qp = DEFAULT_KDETH_QP;
13501
13502 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013503 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13504 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013505
13506 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013507 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13508 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013509}
13510
13511/**
13512 * init_qpmap_table
13513 * @dd - device data
13514 * @first_ctxt - first context
13515 * @last_ctxt - first context
13516 *
13517 * This return sets the qpn mapping table that
13518 * is indexed by qpn[8:1].
13519 *
13520 * The routine will round robin the 256 settings
13521 * from first_ctxt to last_ctxt.
13522 *
13523 * The first/last looks ahead to having specialized
13524 * receive contexts for mgmt and bypass. Normal
13525 * verbs traffic will assumed to be on a range
13526 * of receive contexts.
13527 */
13528static void init_qpmap_table(struct hfi1_devdata *dd,
13529 u32 first_ctxt,
13530 u32 last_ctxt)
13531{
13532 u64 reg = 0;
13533 u64 regno = RCV_QP_MAP_TABLE;
13534 int i;
13535 u64 ctxt = first_ctxt;
13536
13537 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013538 reg |= ctxt << (8 * (i % 8));
13539 i++;
13540 ctxt++;
13541 if (ctxt > last_ctxt)
13542 ctxt = first_ctxt;
13543 if (i % 8 == 0) {
13544 write_csr(dd, regno, reg);
13545 reg = 0;
13546 regno += 8;
13547 }
13548 }
13549 if (i % 8)
13550 write_csr(dd, regno, reg);
13551
13552 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13553 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13554}
13555
13556/**
13557 * init_qos - init RX qos
13558 * @dd - device data
13559 * @first_context
13560 *
13561 * This routine initializes Rule 0 and the
13562 * RSM map table to implement qos.
13563 *
13564 * If all of the limit tests succeed,
13565 * qos is applied based on the array
13566 * interpretation of krcvqs where
13567 * entry 0 is VL0.
13568 *
13569 * The number of vl bits (n) and the number of qpn
13570 * bits (m) are computed to feed both the RSM map table
13571 * and the single rule.
13572 *
13573 */
13574static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13575{
13576 u8 max_by_vl = 0;
13577 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13578 u64 *rsmmap;
13579 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013580 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013581
13582 /* validate */
13583 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13584 num_vls == 1 ||
13585 krcvqsset <= 1)
13586 goto bail;
13587 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13588 if (krcvqs[i] > max_by_vl)
13589 max_by_vl = krcvqs[i];
13590 if (max_by_vl > 32)
13591 goto bail;
13592 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13593 /* determine bits vl */
13594 n = ilog2(num_vls);
13595 /* determine bits for qpn */
13596 m = ilog2(qpns_per_vl);
13597 if ((m + n) > 7)
13598 goto bail;
13599 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13600 goto bail;
13601 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013602 if (!rsmmap)
13603 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013604 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13605 /* init the local copy of the table */
13606 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13607 unsigned tctxt;
13608
13609 for (qpn = 0, tctxt = ctxt;
13610 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13611 unsigned idx, regoff, regidx;
13612
13613 /* generate index <= 128 */
13614 idx = (qpn << n) ^ i;
13615 regoff = (idx % 8) * 8;
13616 regidx = idx / 8;
13617 reg = rsmmap[regidx];
13618 /* replace 0xff with context number */
13619 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13620 << regoff);
13621 reg |= (u64)(tctxt++) << regoff;
13622 rsmmap[regidx] = reg;
13623 if (tctxt == ctxt + krcvqs[i])
13624 tctxt = ctxt;
13625 }
13626 ctxt += krcvqs[i];
13627 }
13628 /* flush cached copies to chip */
13629 for (i = 0; i < NUM_MAP_REGS; i++)
13630 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13631 /* add rule0 */
13632 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
Jubin John17fb4f22016-02-14 20:21:52 -080013633 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK <<
13634 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13635 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013636 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
Jubin John17fb4f22016-02-14 20:21:52 -080013637 LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13638 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13639 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13640 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13641 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13642 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013643 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
Jubin John17fb4f22016-02-14 20:21:52 -080013644 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13645 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13646 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13647 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013648 /* Enable RSM */
13649 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13650 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013651 /* map everything else to first context */
13652 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013653 dd->qos_shift = n + 1;
13654 return;
13655bail:
13656 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013657 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013658}
13659
13660static void init_rxe(struct hfi1_devdata *dd)
13661{
13662 /* enable all receive errors */
13663 write_csr(dd, RCV_ERR_MASK, ~0ull);
13664 /* setup QPN map table - start where VL15 context leaves off */
Jubin John17fb4f22016-02-14 20:21:52 -080013665 init_qos(dd, dd->n_krcv_queues > MIN_KERNEL_KCTXTS ?
13666 MIN_KERNEL_KCTXTS : 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013667 /*
13668 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13669 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13670 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13671 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13672 * Max_PayLoad_Size set to its minimum of 128.
13673 *
13674 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13675 * (64 bytes). Max_Payload_Size is possibly modified upward in
13676 * tune_pcie_caps() which is called after this routine.
13677 */
13678}
13679
13680static void init_other(struct hfi1_devdata *dd)
13681{
13682 /* enable all CCE errors */
13683 write_csr(dd, CCE_ERR_MASK, ~0ull);
13684 /* enable *some* Misc errors */
13685 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13686 /* enable all DC errors, except LCB */
13687 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13688 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13689}
13690
13691/*
13692 * Fill out the given AU table using the given CU. A CU is defined in terms
13693 * AUs. The table is a an encoding: given the index, how many AUs does that
13694 * represent?
13695 *
13696 * NOTE: Assumes that the register layout is the same for the
13697 * local and remote tables.
13698 */
13699static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13700 u32 csr0to3, u32 csr4to7)
13701{
13702 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080013703 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13704 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13705 2ull * cu <<
13706 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13707 4ull * cu <<
13708 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013709 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080013710 8ull * cu <<
13711 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13712 16ull * cu <<
13713 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13714 32ull * cu <<
13715 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13716 64ull * cu <<
13717 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013718}
13719
13720static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13721{
13722 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013723 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013724}
13725
13726void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13727{
13728 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080013729 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013730}
13731
13732static void init_txe(struct hfi1_devdata *dd)
13733{
13734 int i;
13735
13736 /* enable all PIO, SDMA, general, and Egress errors */
13737 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13738 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13739 write_csr(dd, SEND_ERR_MASK, ~0ull);
13740 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13741
13742 /* enable all per-context and per-SDMA engine errors */
13743 for (i = 0; i < dd->chip_send_contexts; i++)
13744 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13745 for (i = 0; i < dd->chip_sdma_engines; i++)
13746 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13747
13748 /* set the local CU to AU mapping */
13749 assign_local_cm_au_table(dd, dd->vcu);
13750
13751 /*
13752 * Set reasonable default for Credit Return Timer
13753 * Don't set on Simulator - causes it to choke.
13754 */
13755 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13756 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13757}
13758
13759int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13760{
13761 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13762 unsigned sctxt;
13763 int ret = 0;
13764 u64 reg;
13765
13766 if (!rcd || !rcd->sc) {
13767 ret = -EINVAL;
13768 goto done;
13769 }
13770 sctxt = rcd->sc->hw_context;
13771 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13772 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13773 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13774 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13775 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13776 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13777 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13778 /*
13779 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013780 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013781 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013782 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13783 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13784 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13785 }
13786
13787 /* Enable J_KEY check on receive context. */
13788 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13789 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13790 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13791 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13792done:
13793 return ret;
13794}
13795
13796int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13797{
13798 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13799 unsigned sctxt;
13800 int ret = 0;
13801 u64 reg;
13802
13803 if (!rcd || !rcd->sc) {
13804 ret = -EINVAL;
13805 goto done;
13806 }
13807 sctxt = rcd->sc->hw_context;
13808 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13809 /*
13810 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13811 * This check would not have been enabled for A0 h/w, see
13812 * set_ctxt_jkey().
13813 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013814 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013815 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13816 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13817 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13818 }
13819 /* Turn off the J_KEY on the receive side */
13820 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13821done:
13822 return ret;
13823}
13824
13825int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13826{
13827 struct hfi1_ctxtdata *rcd;
13828 unsigned sctxt;
13829 int ret = 0;
13830 u64 reg;
13831
Jubin Johne4909742016-02-14 20:22:00 -080013832 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013833 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080013834 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013835 ret = -EINVAL;
13836 goto done;
13837 }
13838 if (!rcd || !rcd->sc) {
13839 ret = -EINVAL;
13840 goto done;
13841 }
13842 sctxt = rcd->sc->hw_context;
13843 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13844 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13845 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13846 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13847 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13848 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13849done:
13850 return ret;
13851}
13852
13853int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13854{
13855 struct hfi1_ctxtdata *rcd;
13856 unsigned sctxt;
13857 int ret = 0;
13858 u64 reg;
13859
Jubin Johne4909742016-02-14 20:22:00 -080013860 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013861 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080013862 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013863 ret = -EINVAL;
13864 goto done;
13865 }
13866 if (!rcd || !rcd->sc) {
13867 ret = -EINVAL;
13868 goto done;
13869 }
13870 sctxt = rcd->sc->hw_context;
13871 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13872 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13873 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13874 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13875done:
13876 return ret;
13877}
13878
13879/*
13880 * Start doing the clean up the the chip. Our clean up happens in multiple
13881 * stages and this is just the first.
13882 */
13883void hfi1_start_cleanup(struct hfi1_devdata *dd)
13884{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013885 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013886 free_cntrs(dd);
13887 free_rcverr(dd);
13888 clean_up_interrupts(dd);
13889}
13890
13891#define HFI_BASE_GUID(dev) \
13892 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13893
13894/*
13895 * Certain chip functions need to be initialized only once per asic
13896 * instead of per-device. This function finds the peer device and
13897 * checks whether that chip initialization needs to be done by this
13898 * device.
13899 */
13900static void asic_should_init(struct hfi1_devdata *dd)
13901{
13902 unsigned long flags;
13903 struct hfi1_devdata *tmp, *peer = NULL;
13904
13905 spin_lock_irqsave(&hfi1_devs_lock, flags);
13906 /* Find our peer device */
13907 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13908 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13909 dd->unit != tmp->unit) {
13910 peer = tmp;
13911 break;
13912 }
13913 }
13914
13915 /*
13916 * "Claim" the ASIC for initialization if it hasn't been
13917 " "claimed" yet.
13918 */
13919 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13920 dd->flags |= HFI1_DO_INIT_ASIC;
13921 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13922}
13923
Dean Luick5d9157a2015-11-16 21:59:34 -050013924/*
13925 * Set dd->boardname. Use a generic name if a name is not returned from
13926 * EFI variable space.
13927 *
13928 * Return 0 on success, -ENOMEM if space could not be allocated.
13929 */
13930static int obtain_boardname(struct hfi1_devdata *dd)
13931{
13932 /* generic board description */
13933 const char generic[] =
13934 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13935 unsigned long size;
13936 int ret;
13937
13938 ret = read_hfi1_efi_var(dd, "description", &size,
13939 (void **)&dd->boardname);
13940 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080013941 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050013942 /* use generic description */
13943 dd->boardname = kstrdup(generic, GFP_KERNEL);
13944 if (!dd->boardname)
13945 return -ENOMEM;
13946 }
13947 return 0;
13948}
13949
Mike Marciniszyn77241052015-07-30 15:17:43 -040013950/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013951 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013952 * @dev: the pci_dev for hfi1_ib device
13953 * @ent: pci_device_id struct for this dev
13954 *
13955 * Also allocates, initializes, and returns the devdata struct for this
13956 * device instance
13957 *
13958 * This is global, and is called directly at init to set up the
13959 * chip-specific function pointers for later use.
13960 */
13961struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13962 const struct pci_device_id *ent)
13963{
13964 struct hfi1_devdata *dd;
13965 struct hfi1_pportdata *ppd;
13966 u64 reg;
13967 int i, ret;
13968 static const char * const inames[] = { /* implementation names */
13969 "RTL silicon",
13970 "RTL VCS simulation",
13971 "RTL FPGA emulation",
13972 "Functional simulator"
13973 };
13974
Jubin John17fb4f22016-02-14 20:21:52 -080013975 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
13976 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040013977 if (IS_ERR(dd))
13978 goto bail;
13979 ppd = dd->pport;
13980 for (i = 0; i < dd->num_pports; i++, ppd++) {
13981 int vl;
13982 /* init common fields */
13983 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13984 /* DC supports 4 link widths */
13985 ppd->link_width_supported =
13986 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13987 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13988 ppd->link_width_downgrade_supported =
13989 ppd->link_width_supported;
13990 /* start out enabling only 4X */
13991 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13992 ppd->link_width_downgrade_enabled =
13993 ppd->link_width_downgrade_supported;
13994 /* link width active is 0 when link is down */
13995 /* link width downgrade active is 0 when link is down */
13996
Jubin Johnd0d236e2016-02-14 20:20:15 -080013997 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
13998 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013999 hfi1_early_err(&pdev->dev,
14000 "Invalid num_vls %u, using %u VLs\n",
14001 num_vls, HFI1_MAX_VLS_SUPPORTED);
14002 num_vls = HFI1_MAX_VLS_SUPPORTED;
14003 }
14004 ppd->vls_supported = num_vls;
14005 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014006 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014007 /* Set the default MTU. */
14008 for (vl = 0; vl < num_vls; vl++)
14009 dd->vld[vl].mtu = hfi1_max_mtu;
14010 dd->vld[15].mtu = MAX_MAD_PACKET;
14011 /*
14012 * Set the initial values to reasonable default, will be set
14013 * for real when link is up.
14014 */
14015 ppd->lstate = IB_PORT_DOWN;
14016 ppd->overrun_threshold = 0x4;
14017 ppd->phy_error_threshold = 0xf;
14018 ppd->port_crc_mode_enabled = link_crc_mask;
14019 /* initialize supported LTP CRC mode */
14020 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14021 /* initialize enabled LTP CRC mode */
14022 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14023 /* start in offline */
14024 ppd->host_link_state = HLS_DN_OFFLINE;
14025 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014026 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014027 }
14028
14029 dd->link_default = HLS_DN_POLL;
14030
14031 /*
14032 * Do remaining PCIe setup and save PCIe values in dd.
14033 * Any error printing is already done by the init code.
14034 * On return, we have the chip mapped.
14035 */
14036 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14037 if (ret < 0)
14038 goto bail_free;
14039
14040 /* verify that reads actually work, save revision for reset check */
14041 dd->revision = read_csr(dd, CCE_REVISION);
14042 if (dd->revision == ~(u64)0) {
14043 dd_dev_err(dd, "cannot read chip CSRs\n");
14044 ret = -EINVAL;
14045 goto bail_cleanup;
14046 }
14047 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14048 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14049 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14050 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14051
Jubin John4d114fd2016-02-14 20:21:43 -080014052 /*
14053 * obtain the hardware ID - NOT related to unit, which is a
14054 * software enumeration
14055 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014056 reg = read_csr(dd, CCE_REVISION2);
14057 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14058 & CCE_REVISION2_HFI_ID_MASK;
14059 /* the variable size will remove unwanted bits */
14060 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14061 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14062 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014063 dd->icode < ARRAY_SIZE(inames) ?
14064 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014065
14066 /* speeds the hardware can support */
14067 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14068 /* speeds allowed to run at */
14069 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14070 /* give a reasonable active value, will be set on link up */
14071 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14072
14073 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14074 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14075 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14076 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14077 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14078 /* fix up link widths for emulation _p */
14079 ppd = dd->pport;
14080 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14081 ppd->link_width_supported =
14082 ppd->link_width_enabled =
14083 ppd->link_width_downgrade_supported =
14084 ppd->link_width_downgrade_enabled =
14085 OPA_LINK_WIDTH_1X;
14086 }
14087 /* insure num_vls isn't larger than number of sdma engines */
14088 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14089 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014090 num_vls, dd->chip_sdma_engines);
14091 num_vls = dd->chip_sdma_engines;
14092 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014093 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014094 }
14095
14096 /*
14097 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14098 * Limit the max if larger than the field holds. If timeout is
14099 * non-zero, then the calculated field will be at least 1.
14100 *
14101 * Must be after icode is set up - the cclock rate depends
14102 * on knowing the hardware being used.
14103 */
14104 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14105 if (dd->rcv_intr_timeout_csr >
14106 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14107 dd->rcv_intr_timeout_csr =
14108 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14109 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14110 dd->rcv_intr_timeout_csr = 1;
14111
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014112 /* needs to be done before we look for the peer device */
14113 read_guid(dd);
14114
14115 /* should this device init the ASIC block? */
14116 asic_should_init(dd);
14117
Mike Marciniszyn77241052015-07-30 15:17:43 -040014118 /* obtain chip sizes, reset chip CSRs */
14119 init_chip(dd);
14120
14121 /* read in the PCIe link speed information */
14122 ret = pcie_speeds(dd);
14123 if (ret)
14124 goto bail_cleanup;
14125
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014126 /* Needs to be called before hfi1_firmware_init */
14127 get_platform_config(dd);
14128
Mike Marciniszyn77241052015-07-30 15:17:43 -040014129 /* read in firmware */
14130 ret = hfi1_firmware_init(dd);
14131 if (ret)
14132 goto bail_cleanup;
14133
14134 /*
14135 * In general, the PCIe Gen3 transition must occur after the
14136 * chip has been idled (so it won't initiate any PCIe transactions
14137 * e.g. an interrupt) and before the driver changes any registers
14138 * (the transition will reset the registers).
14139 *
14140 * In particular, place this call after:
14141 * - init_chip() - the chip will not initiate any PCIe transactions
14142 * - pcie_speeds() - reads the current link speed
14143 * - hfi1_firmware_init() - the needed firmware is ready to be
14144 * downloaded
14145 */
14146 ret = do_pcie_gen3_transition(dd);
14147 if (ret)
14148 goto bail_cleanup;
14149
14150 /* start setting dd values and adjusting CSRs */
14151 init_early_variables(dd);
14152
14153 parse_platform_config(dd);
14154
Dean Luick5d9157a2015-11-16 21:59:34 -050014155 ret = obtain_boardname(dd);
14156 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014157 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014158
14159 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014160 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014161 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014162 (u32)dd->majrev,
14163 (u32)dd->minrev,
14164 (dd->revision >> CCE_REVISION_SW_SHIFT)
14165 & CCE_REVISION_SW_MASK);
14166
14167 ret = set_up_context_variables(dd);
14168 if (ret)
14169 goto bail_cleanup;
14170
14171 /* set initial RXE CSRs */
14172 init_rxe(dd);
14173 /* set initial TXE CSRs */
14174 init_txe(dd);
14175 /* set initial non-RXE, non-TXE CSRs */
14176 init_other(dd);
14177 /* set up KDETH QP prefix in both RX and TX CSRs */
14178 init_kdeth_qp(dd);
14179
Mitko Haralanov957558c2016-02-03 14:33:40 -080014180 ret = hfi1_dev_affinity_init(dd);
14181 if (ret)
14182 goto bail_cleanup;
14183
Mike Marciniszyn77241052015-07-30 15:17:43 -040014184 /* send contexts must be set up before receive contexts */
14185 ret = init_send_contexts(dd);
14186 if (ret)
14187 goto bail_cleanup;
14188
14189 ret = hfi1_create_ctxts(dd);
14190 if (ret)
14191 goto bail_cleanup;
14192
14193 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14194 /*
14195 * rcd[0] is guaranteed to be valid by this point. Also, all
14196 * context are using the same value, as per the module parameter.
14197 */
14198 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14199
14200 ret = init_pervl_scs(dd);
14201 if (ret)
14202 goto bail_cleanup;
14203
14204 /* sdma init */
14205 for (i = 0; i < dd->num_pports; ++i) {
14206 ret = sdma_init(dd, i);
14207 if (ret)
14208 goto bail_cleanup;
14209 }
14210
14211 /* use contexts created by hfi1_create_ctxts */
14212 ret = set_up_interrupts(dd);
14213 if (ret)
14214 goto bail_cleanup;
14215
14216 /* set up LCB access - must be after set_up_interrupts() */
14217 init_lcb_access(dd);
14218
14219 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14220 dd->base_guid & 0xFFFFFF);
14221
14222 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14223 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14224 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14225
14226 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14227 if (ret)
14228 goto bail_clear_intr;
14229 check_fabric_firmware_versions(dd);
14230
14231 thermal_init(dd);
14232
14233 ret = init_cntrs(dd);
14234 if (ret)
14235 goto bail_clear_intr;
14236
14237 ret = init_rcverr(dd);
14238 if (ret)
14239 goto bail_free_cntrs;
14240
14241 ret = eprom_init(dd);
14242 if (ret)
14243 goto bail_free_rcverr;
14244
14245 goto bail;
14246
14247bail_free_rcverr:
14248 free_rcverr(dd);
14249bail_free_cntrs:
14250 free_cntrs(dd);
14251bail_clear_intr:
14252 clean_up_interrupts(dd);
14253bail_cleanup:
14254 hfi1_pcie_ddcleanup(dd);
14255bail_free:
14256 hfi1_free_devdata(dd);
14257 dd = ERR_PTR(ret);
14258bail:
14259 return dd;
14260}
14261
14262static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14263 u32 dw_len)
14264{
14265 u32 delta_cycles;
14266 u32 current_egress_rate = ppd->current_egress_rate;
14267 /* rates here are in units of 10^6 bits/sec */
14268
14269 if (desired_egress_rate == -1)
14270 return 0; /* shouldn't happen */
14271
14272 if (desired_egress_rate >= current_egress_rate)
14273 return 0; /* we can't help go faster, only slower */
14274
14275 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14276 egress_cycles(dw_len * 4, current_egress_rate);
14277
14278 return (u16)delta_cycles;
14279}
14280
Mike Marciniszyn77241052015-07-30 15:17:43 -040014281/**
14282 * create_pbc - build a pbc for transmission
14283 * @flags: special case flags or-ed in built pbc
14284 * @srate: static rate
14285 * @vl: vl
14286 * @dwlen: dword length (header words + data words + pbc words)
14287 *
14288 * Create a PBC with the given flags, rate, VL, and length.
14289 *
14290 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14291 * for verbs, which does not use this PSM feature. The lone other caller
14292 * is for the diagnostic interface which calls this if the user does not
14293 * supply their own PBC.
14294 */
14295u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14296 u32 dw_len)
14297{
14298 u64 pbc, delay = 0;
14299
14300 if (unlikely(srate_mbs))
14301 delay = delay_cycles(ppd, srate_mbs, dw_len);
14302
14303 pbc = flags
14304 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14305 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14306 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14307 | (dw_len & PBC_LENGTH_DWS_MASK)
14308 << PBC_LENGTH_DWS_SHIFT;
14309
14310 return pbc;
14311}
14312
14313#define SBUS_THERMAL 0x4f
14314#define SBUS_THERM_MONITOR_MODE 0x1
14315
14316#define THERM_FAILURE(dev, ret, reason) \
14317 dd_dev_err((dd), \
14318 "Thermal sensor initialization failed: %s (%d)\n", \
14319 (reason), (ret))
14320
14321/*
14322 * Initialize the Avago Thermal sensor.
14323 *
14324 * After initialization, enable polling of thermal sensor through
14325 * SBus interface. In order for this to work, the SBus Master
14326 * firmware has to be loaded due to the fact that the HW polling
14327 * logic uses SBus interrupts, which are not supported with
14328 * default firmware. Otherwise, no data will be returned through
14329 * the ASIC_STS_THERM CSR.
14330 */
14331static int thermal_init(struct hfi1_devdata *dd)
14332{
14333 int ret = 0;
14334
14335 if (dd->icode != ICODE_RTL_SILICON ||
14336 !(dd->flags & HFI1_DO_INIT_ASIC))
14337 return ret;
14338
14339 acquire_hw_mutex(dd);
14340 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014341 /* Disable polling of thermal readings */
14342 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14343 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014344 /* Thermal Sensor Initialization */
14345 /* Step 1: Reset the Thermal SBus Receiver */
14346 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14347 RESET_SBUS_RECEIVER, 0);
14348 if (ret) {
14349 THERM_FAILURE(dd, ret, "Bus Reset");
14350 goto done;
14351 }
14352 /* Step 2: Set Reset bit in Thermal block */
14353 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14354 WRITE_SBUS_RECEIVER, 0x1);
14355 if (ret) {
14356 THERM_FAILURE(dd, ret, "Therm Block Reset");
14357 goto done;
14358 }
14359 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14360 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14361 WRITE_SBUS_RECEIVER, 0x32);
14362 if (ret) {
14363 THERM_FAILURE(dd, ret, "Write Clock Div");
14364 goto done;
14365 }
14366 /* Step 4: Select temperature mode */
14367 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14368 WRITE_SBUS_RECEIVER,
14369 SBUS_THERM_MONITOR_MODE);
14370 if (ret) {
14371 THERM_FAILURE(dd, ret, "Write Mode Sel");
14372 goto done;
14373 }
14374 /* Step 5: De-assert block reset and start conversion */
14375 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14376 WRITE_SBUS_RECEIVER, 0x2);
14377 if (ret) {
14378 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14379 goto done;
14380 }
14381 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14382 msleep(22);
14383
14384 /* Enable polling of thermal readings */
14385 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14386done:
14387 release_hw_mutex(dd);
14388 return ret;
14389}
14390
14391static void handle_temp_err(struct hfi1_devdata *dd)
14392{
14393 struct hfi1_pportdata *ppd = &dd->pport[0];
14394 /*
14395 * Thermal Critical Interrupt
14396 * Put the device into forced freeze mode, take link down to
14397 * offline, and put DC into reset.
14398 */
14399 dd_dev_emerg(dd,
14400 "Critical temperature reached! Forcing device into freeze mode!\n");
14401 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080014402 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014403 /*
14404 * Shut DC down as much and as quickly as possible.
14405 *
14406 * Step 1: Take the link down to OFFLINE. This will cause the
14407 * 8051 to put the Serdes in reset. However, we don't want to
14408 * go through the entire link state machine since we want to
14409 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14410 * but rather an attempt to save the chip.
14411 * Code below is almost the same as quiet_serdes() but avoids
14412 * all the extra work and the sleeps.
14413 */
14414 ppd->driver_link_ready = 0;
14415 ppd->link_enabled = 0;
14416 set_physical_link_state(dd, PLS_OFFLINE |
14417 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14418 /*
14419 * Step 2: Shutdown LCB and 8051
14420 * After shutdown, do not restore DC_CFG_RESET value.
14421 */
14422 dc_shutdown(dd);
14423}